From ffed5c1bcc32a65ba7b2c2ef55e9f865da55cfba Mon Sep 17 00:00:00 2001 From: Raymond Tukpe Date: Tue, 14 Apr 2026 13:31:25 +0200 Subject: [PATCH 1/7] refactor: migrate jobs and feature flags from postgres to sqlc Migrate the last remaining database/postgres/ repository implementations to sqlc-based modules under internal/, completing the postgres-to-sqlc migration. - Create internal/jobs/ with sqlc queries, Service implementing datastore.JobRepository (9 methods), and comprehensive tests - Create internal/feature_flags/ with sqlc queries across 3 tables (feature_flags, feature_flag_overrides, early_adopter_features), Service implementing both fflag.FeatureFlagFetcher and fflag.EarlyAdopterFeatureFetcher, and comprehensive tests - Update all call sites: dataplane worker, server init, API handlers, CLI utilities, and test files - Delete database/postgres/job.go, feature_flag.go, feature_flag_fetcher.go, early_adopter_feature_fetcher.go --- api/handlers/organisation.go | 42 +- api/oauth2_integration_test.go | 6 +- api/oss_login_integration_test.go | 7 +- api/server_suite_test.go | 6 +- cmd/server/server.go | 10 +- cmd/utils/org_feature_flags.go | 23 +- .../postgres/early_adopter_feature_fetcher.go | 34 -- database/postgres/feature_flag.go | 278 ----------- database/postgres/feature_flag_fetcher.go | 47 -- database/postgres/job.go | 370 -------------- database/postgres/job_test.go | 345 -------------- datastore/models.go | 4 + docs/jobs-and-feature-flags-sqlc-migration.md | 44 ++ e2e/oauth2_e2e_test.go | 7 +- internal/dataplane/worker.go | 20 +- internal/feature_flags/impl.go | 428 +++++++++++++++++ internal/feature_flags/impl_test.go | 355 ++++++++++++++ internal/feature_flags/queries.sql | 80 ++++ internal/feature_flags/repo/db.go | 32 ++ internal/feature_flags/repo/models.go | 5 + internal/feature_flags/repo/querier.go | 29 ++ internal/feature_flags/repo/queries.sql.go | 450 ++++++++++++++++++ internal/jobs/impl.go | 292 ++++++++++++ internal/jobs/impl_test.go | 316 ++++++++++++ internal/jobs/queries.sql | 80 ++++ internal/jobs/repo/db.go | 32 ++ internal/jobs/repo/models.go | 5 + internal/jobs/repo/querier.go | 31 ++ internal/jobs/repo/queries.sql.go | 350 ++++++++++++++ sqlc.yaml | 20 + 30 files changed, 2624 insertions(+), 1124 deletions(-) delete mode 100644 database/postgres/early_adopter_feature_fetcher.go delete mode 100644 database/postgres/feature_flag.go delete mode 100644 database/postgres/feature_flag_fetcher.go delete mode 100644 database/postgres/job.go delete mode 100644 database/postgres/job_test.go create mode 100644 docs/jobs-and-feature-flags-sqlc-migration.md create mode 100644 internal/feature_flags/impl.go create mode 100644 internal/feature_flags/impl_test.go create mode 100644 internal/feature_flags/queries.sql create mode 100644 internal/feature_flags/repo/db.go create mode 100644 internal/feature_flags/repo/models.go create mode 100644 internal/feature_flags/repo/querier.go create mode 100644 internal/feature_flags/repo/queries.sql.go create mode 100644 internal/jobs/impl.go create mode 100644 internal/jobs/impl_test.go create mode 100644 internal/jobs/queries.sql create mode 100644 internal/jobs/repo/db.go create mode 100644 internal/jobs/repo/models.go create mode 100644 internal/jobs/repo/querier.go create mode 100644 internal/jobs/repo/queries.sql.go diff --git a/api/handlers/organisation.go b/api/handlers/organisation.go index 9921321e9d..abbaa2795b 100644 --- a/api/handlers/organisation.go +++ b/api/handlers/organisation.go @@ -15,9 +15,9 @@ import ( "github.com/frain-dev/convoy/api/models" "github.com/frain-dev/convoy/api/policies" "github.com/frain-dev/convoy/auth" - "github.com/frain-dev/convoy/database/postgres" "github.com/frain-dev/convoy/datastore" "github.com/frain-dev/convoy/internal/event_deliveries" + "github.com/frain-dev/convoy/internal/feature_flags" "github.com/frain-dev/convoy/internal/organisation_members" "github.com/frain-dev/convoy/internal/organisations" "github.com/frain-dev/convoy/internal/pkg/batch_tracker" @@ -289,7 +289,8 @@ func (h *Handler) GetEarlyAdopterFeatures(w http.ResponseWriter, r *http.Request features := fflag.GetEarlyAdopterFeatures() responseFeatures := make([]models.EarlyAdopterFeature, 0, len(features)) - earlyAdopterFeatures, err := postgres.LoadEarlyAdopterFeaturesByOrg(r.Context(), h.A.DB, org.UID) + ffService := feature_flags.New(h.A.Logger, h.A.DB) + earlyAdopterFeatures, err := ffService.LoadEarlyAdopterFeaturesByOrg(r.Context(), org.UID) if err != nil { _ = render.Render(w, r, util.NewServiceErrResponse(err)) return @@ -378,7 +379,8 @@ func (h *Handler) updateFeatureFlag(w http.ResponseWriter, r *http.Request, feat feature.EnabledAt = null.TimeFrom(time.Now()) } - err := postgres.UpsertEarlyAdopterFeature(r.Context(), h.A.DB, feature) + ffService := feature_flags.New(h.A.Logger, h.A.DB) + err := ffService.UpsertEarlyAdopterFeature(r.Context(), feature) if err != nil { _ = render.Render(w, r, util.NewServiceErrResponse(err)) return err @@ -423,7 +425,8 @@ func (h *Handler) GetAllFeatureFlags(w http.ResponseWriter, r *http.Request) { return } - flags, err := postgres.LoadFeatureFlags(r.Context(), h.A.DB) + ffService := feature_flags.New(h.A.Logger, h.A.DB) + flags, err := ffService.LoadFeatureFlags(r.Context()) if err != nil { _ = render.Render(w, r, util.NewServiceErrResponse(err)) return @@ -482,7 +485,8 @@ func (h *Handler) GetOrganisationOverrides(w http.ResponseWriter, r *http.Reques return } - overrides, err := postgres.LoadFeatureFlagOverridesByOwner(r.Context(), h.A.DB, "organisation", orgID) + ffService := feature_flags.New(h.A.Logger, h.A.DB) + overrides, err := ffService.LoadFeatureFlagOverridesByOwner(r.Context(), "organisation", orgID) if err != nil { _ = render.Render(w, r, util.NewServiceErrResponse(err)) return @@ -496,7 +500,7 @@ func (h *Handler) GetOrganisationOverrides(w http.ResponseWriter, r *http.Reques enrichedOverrides := make([]OverrideWithKey, 0, len(overrides)) for i := range overrides { - featureFlag, err := postgres.FetchFeatureFlagByID(r.Context(), h.A.DB, overrides[i].FeatureFlagID) + featureFlag, err := ffService.FetchFeatureFlagByID(r.Context(), overrides[i].FeatureFlagID) if err != nil { h.A.Logger.WarnContext(r.Context(), fmt.Sprintf("Failed to fetch feature flag for override: %s: %v", overrides[i].FeatureFlagID, err)) continue @@ -537,10 +541,12 @@ func (h *Handler) UpdateOrganisationOverride(w http.ResponseWriter, r *http.Requ return } + ffService := feature_flags.New(h.A.Logger, h.A.DB) + // Fetch the feature flag - featureFlag, err := postgres.FetchFeatureFlagByKey(r.Context(), h.A.DB, overrideRequest.FeatureKey) + featureFlag, err := ffService.FetchFeatureFlagByKey(r.Context(), overrideRequest.FeatureKey) if err != nil { - if errors.Is(err, postgres.ErrFeatureFlagNotFound) { + if errors.Is(err, datastore.ErrFeatureFlagNotFound) { _ = render.Render(w, r, util.NewErrorResponse("Feature flag not found: "+overrideRequest.FeatureKey, http.StatusBadRequest)) return } @@ -566,7 +572,7 @@ func (h *Handler) UpdateOrganisationOverride(w http.ResponseWriter, r *http.Requ override.EnabledAt = null.TimeFrom(time.Now()) } - err = postgres.UpsertFeatureFlagOverride(r.Context(), h.A.DB, override) + err = ffService.UpsertFeatureFlagOverride(r.Context(), override) if err != nil { _ = render.Render(w, r, util.NewServiceErrResponse(err)) return @@ -594,10 +600,12 @@ func (h *Handler) DeleteOrganisationOverride(w http.ResponseWriter, r *http.Requ return } + ffService := feature_flags.New(h.A.Logger, h.A.DB) + // Fetch the feature flag to get its ID - featureFlag, err := postgres.FetchFeatureFlagByKey(r.Context(), h.A.DB, featureKey) + featureFlag, err := ffService.FetchFeatureFlagByKey(r.Context(), featureKey) if err != nil { - if errors.Is(err, postgres.ErrFeatureFlagNotFound) { + if errors.Is(err, datastore.ErrFeatureFlagNotFound) { _ = render.Render(w, r, util.NewErrorResponse("Feature flag not found: "+featureKey, http.StatusBadRequest)) return } @@ -605,7 +613,7 @@ func (h *Handler) DeleteOrganisationOverride(w http.ResponseWriter, r *http.Requ return } - err = postgres.DeleteFeatureFlagOverride(r.Context(), h.A.DB, "organisation", orgID, featureFlag.UID) + err = ffService.DeleteFeatureFlagOverride(r.Context(), "organisation", orgID, featureFlag.UID) if err != nil { _ = render.Render(w, r, util.NewServiceErrResponse(err)) return @@ -904,10 +912,12 @@ func (h *Handler) UpdateFeatureFlag(w http.ResponseWriter, r *http.Request) { return } + ffService := feature_flags.New(h.A.Logger, h.A.DB) + // Fetch the feature flag - featureFlag, err := postgres.FetchFeatureFlagByKey(r.Context(), h.A.DB, featureKey) + featureFlag, err := ffService.FetchFeatureFlagByKey(r.Context(), featureKey) if err != nil { - if errors.Is(err, postgres.ErrFeatureFlagNotFound) { + if errors.Is(err, datastore.ErrFeatureFlagNotFound) { _ = render.Render(w, r, util.NewErrorResponse("Feature flag not found: "+featureKey, http.StatusBadRequest)) return } @@ -917,14 +927,14 @@ func (h *Handler) UpdateFeatureFlag(w http.ResponseWriter, r *http.Request) { // Update enabled state if provided if updateRequest.Enabled != nil { - err = postgres.UpdateFeatureFlag(r.Context(), h.A.DB, featureFlag.UID, *updateRequest.Enabled) + err = ffService.UpdateFeatureFlag(r.Context(), featureFlag.UID, *updateRequest.Enabled) if err != nil { _ = render.Render(w, r, util.NewServiceErrResponse(err)) return } } - updatedFlag, err := postgres.FetchFeatureFlagByID(r.Context(), h.A.DB, featureFlag.UID) + updatedFlag, err := ffService.FetchFeatureFlagByID(r.Context(), featureFlag.UID) if err != nil { _ = render.Render(w, r, util.NewServiceErrResponse(err)) return diff --git a/api/oauth2_integration_test.go b/api/oauth2_integration_test.go index eb039a39fb..1486a2d322 100644 --- a/api/oauth2_integration_test.go +++ b/api/oauth2_integration_test.go @@ -25,10 +25,10 @@ import ( mcache "github.com/frain-dev/convoy/cache/memory" "github.com/frain-dev/convoy/config" "github.com/frain-dev/convoy/database" - "github.com/frain-dev/convoy/database/postgres" "github.com/frain-dev/convoy/datastore" "github.com/frain-dev/convoy/internal/api_keys" "github.com/frain-dev/convoy/internal/endpoints" + "github.com/frain-dev/convoy/internal/feature_flags" "github.com/frain-dev/convoy/internal/pkg/fflag" "github.com/frain-dev/convoy/internal/pkg/keys" "github.com/frain-dev/convoy/internal/pkg/metrics" @@ -589,6 +589,8 @@ func (s *OAuth2IntegrationTestSuite) Test_CreateEndpoint_WithOAuth2ClientAsserti func enableOAuth2FeatureFlag(t *testing.T, db database.Database, orgID string) error { t.Helper() + ffService := feature_flags.New(log.New("convoy", log.LevelError), db) + // Create or update early adopter feature feature := &datastore.EarlyAdopterFeature{ OrganisationID: orgID, @@ -597,7 +599,7 @@ func enableOAuth2FeatureFlag(t *testing.T, db database.Database, orgID string) e EnabledAt: null.TimeFrom(time.Now()), } - return postgres.UpsertEarlyAdopterFeature(context.Background(), db, feature) + return ffService.UpsertEarlyAdopterFeature(context.Background(), feature) } func TestOAuth2IntegrationTestSuite(t *testing.T) { diff --git a/api/oss_login_integration_test.go b/api/oss_login_integration_test.go index 9b78997dba..c761c12457 100644 --- a/api/oss_login_integration_test.go +++ b/api/oss_login_integration_test.go @@ -16,10 +16,10 @@ import ( "github.com/frain-dev/convoy/api/types" rcache "github.com/frain-dev/convoy/cache/redis" "github.com/frain-dev/convoy/config" - "github.com/frain-dev/convoy/database/postgres" "github.com/frain-dev/convoy/datastore" "github.com/frain-dev/convoy/internal/api_keys" "github.com/frain-dev/convoy/internal/configuration" + "github.com/frain-dev/convoy/internal/feature_flags" "github.com/frain-dev/convoy/internal/pkg/fflag" rlimiter "github.com/frain-dev/convoy/internal/pkg/limiter/redis" "github.com/frain-dev/convoy/internal/portal_links" @@ -134,6 +134,7 @@ func (s *OSSLoginIntegrationTestSuite) buildServerWithMockLicenser(t *testing.T, noopCache := rcache.NewRedisCacheFromClient(tl.Redis) limiter := rlimiter.NewLimiterFromRedisClient(tl.Redis) + ffService := feature_flags.New(tl.Logger, db) ah, err := NewApplicationHandler( &types.APIOptions{ @@ -143,8 +144,8 @@ func (s *OSSLoginIntegrationTestSuite) buildServerWithMockLicenser(t *testing.T, Logger: tl.Logger, Cache: noopCache, FFlag: fflag.NewFFlag([]string{string(fflag.Prometheus), string(fflag.FullTextSearch)}), - FeatureFlagFetcher: postgres.NewFeatureFlagFetcher(db), - EarlyAdopterFeatureFetcher: postgres.NewEarlyAdopterFeatureFetcher(db), + FeatureFlagFetcher: ffService, + EarlyAdopterFeatureFetcher: ffService, Rate: limiter, ConfigRepo: configuration.New(tl.Logger, db), Licenser: licenser, diff --git a/api/server_suite_test.go b/api/server_suite_test.go index eb872a2822..8c96ab5147 100644 --- a/api/server_suite_test.go +++ b/api/server_suite_test.go @@ -30,6 +30,7 @@ import ( "github.com/frain-dev/convoy/database/postgres" "github.com/frain-dev/convoy/datastore" "github.com/frain-dev/convoy/internal/configuration" + "github.com/frain-dev/convoy/internal/feature_flags" "github.com/frain-dev/convoy/internal/pkg/fflag" "github.com/frain-dev/convoy/internal/pkg/keys" noopLicenser "github.com/frain-dev/convoy/internal/pkg/license/noop" @@ -182,6 +183,7 @@ func buildServer(t *testing.T) *ApplicationHandler { noopCache := rcache.NewRedisCacheFromClient(tl.Redis) limiter := rlimiter.NewLimiterFromRedisClient(tl.Redis) + ffService := feature_flags.New(tl.Logger, db) ah, err := NewApplicationHandler( &types.APIOptions{ @@ -191,8 +193,8 @@ func buildServer(t *testing.T) *ApplicationHandler { Logger: tl.Logger, Cache: noopCache, FFlag: fflag.NewFFlag([]string{string(fflag.Prometheus), string(fflag.FullTextSearch)}), - FeatureFlagFetcher: postgres.NewFeatureFlagFetcher(db), - EarlyAdopterFeatureFetcher: postgres.NewEarlyAdopterFeatureFetcher(db), + FeatureFlagFetcher: ffService, + EarlyAdopterFeatureFetcher: ffService, Rate: limiter, ConfigRepo: configuration.New(tl.Logger, db), Licenser: noopLicenser.NewLicenser(), diff --git a/cmd/server/server.go b/cmd/server/server.go index 75ce5183e2..5478f5f8d8 100644 --- a/cmd/server/server.go +++ b/cmd/server/server.go @@ -13,9 +13,9 @@ import ( "github.com/frain-dev/convoy/api/types" "github.com/frain-dev/convoy/auth/realm_chain" "github.com/frain-dev/convoy/config" - "github.com/frain-dev/convoy/database/postgres" "github.com/frain-dev/convoy/internal/api_keys" "github.com/frain-dev/convoy/internal/configuration" + "github.com/frain-dev/convoy/internal/feature_flags" "github.com/frain-dev/convoy/internal/pkg/cli" "github.com/frain-dev/convoy/internal/pkg/fflag" "github.com/frain-dev/convoy/internal/pkg/keys" @@ -126,8 +126,6 @@ func StartConvoyServer(a *cli.App) error { } flag := fflag.NewFFlag(cfg.EnableFeatureFlag) - featureFlagFetcher := postgres.NewFeatureFlagFetcher(a.DB) - earlyAdopterFeatureFetcher := postgres.NewEarlyAdopterFeatureFetcher(a.DB) if cfg.Server.HTTP.Port <= 0 { return errors.New("please provide the HTTP port in the convoy.json file") @@ -135,13 +133,15 @@ func StartConvoyServer(a *cli.App) error { lo := a.Logger + featureFlagSvc := feature_flags.New(lo, a.DB) + srv := server.NewServer(cfg.Server.HTTP.Port, func() {}) handler, err := api.NewApplicationHandler( &types.APIOptions{ FFlag: flag, - FeatureFlagFetcher: featureFlagFetcher, - EarlyAdopterFeatureFetcher: earlyAdopterFeatureFetcher, + FeatureFlagFetcher: featureFlagSvc, + EarlyAdopterFeatureFetcher: featureFlagSvc, DB: a.DB, Queue: a.Queue, Logger: lo, diff --git a/cmd/utils/org_feature_flags.go b/cmd/utils/org_feature_flags.go index a375e97f16..a3ba805701 100644 --- a/cmd/utils/org_feature_flags.go +++ b/cmd/utils/org_feature_flags.go @@ -12,9 +12,8 @@ import ( "github.com/spf13/cobra" "gopkg.in/guregu/null.v4" - "github.com/frain-dev/convoy/database" - "github.com/frain-dev/convoy/database/postgres" "github.com/frain-dev/convoy/datastore" + "github.com/frain-dev/convoy/internal/feature_flags" "github.com/frain-dev/convoy/internal/organisations" "github.com/frain-dev/convoy/internal/pkg/cli" "github.com/frain-dev/convoy/internal/pkg/fflag" @@ -51,10 +50,12 @@ func AddUpdateOrgFeatureFlagsCommand(a *cli.App) *cobra.Command { return fmt.Errorf("failed to fetch organisation: %w", err) } + ffService := feature_flags.New(a.Logger, db) + slog.Info(fmt.Sprintf("Updating feature flags for organisation: %s (%s)", org.Name, org.UID)) - errorList := processFeatureFlags(context.Background(), db, orgID, enableFlags, true) - errorList = append(errorList, processFeatureFlags(context.Background(), db, orgID, disableFlags, false)...) + errorList := processFeatureFlags(context.Background(), ffService, orgID, enableFlags, true) + errorList = append(errorList, processFeatureFlags(context.Background(), ffService, orgID, disableFlags, false)...) if len(errorList) > 0 { slog.Error(fmt.Sprintf("Encountered %d errors:", len(errorList))) @@ -79,7 +80,7 @@ func AddUpdateOrgFeatureFlagsCommand(a *cli.App) *cobra.Command { return cmd } -func processFeatureFlags(ctx context.Context, db database.Database, orgID string, flags []string, enabled bool) []string { +func processFeatureFlags(ctx context.Context, ffService *feature_flags.Service, orgID string, flags []string, enabled bool) []string { var errorList []string for _, flag := range flags { flagKey := strings.ToLower(strings.TrimSpace(flag)) @@ -88,7 +89,7 @@ func processFeatureFlags(ctx context.Context, db database.Database, orgID string continue } - err := updateOrgFeatureFlag(ctx, db, orgID, flagKey, enabled) + err := updateOrgFeatureFlag(ctx, ffService, orgID, flagKey, enabled) if err != nil { action := "enable" if !enabled { @@ -107,10 +108,10 @@ func processFeatureFlags(ctx context.Context, db database.Database, orgID string return errorList } -func updateOrgFeatureFlag(ctx context.Context, db database.Database, orgID, flagKey string, enabled bool) error { - featureFlag, err := postgres.FetchFeatureFlagByKey(ctx, db, flagKey) +func updateOrgFeatureFlag(ctx context.Context, ffService *feature_flags.Service, orgID, flagKey string, enabled bool) error { + featureFlag, err := ffService.FetchFeatureFlagByKey(ctx, flagKey) if err != nil { - if errors.Is(err, postgres.ErrFeatureFlagNotFound) { + if errors.Is(err, datastore.ErrFeatureFlagNotFound) { return fmt.Errorf("%w: %s", ErrFeatureFlagNotFound, flagKey) } return err @@ -126,7 +127,7 @@ func updateOrgFeatureFlag(ctx context.Context, db database.Database, orgID, flag if enabled { feature.EnabledAt = null.TimeFrom(time.Now()) } - return postgres.UpsertEarlyAdopterFeature(ctx, db, feature) + return ffService.UpsertEarlyAdopterFeature(ctx, feature) } override := &datastore.FeatureFlagOverride{ @@ -141,7 +142,7 @@ func updateOrgFeatureFlag(ctx context.Context, db database.Database, orgID, flag override.EnabledAt = null.TimeFrom(time.Now()) } - return postgres.UpsertFeatureFlagOverride(ctx, db, override) + return ffService.UpsertFeatureFlagOverride(ctx, override) } func isValidFeatureFlag(flagKey string) bool { diff --git a/database/postgres/early_adopter_feature_fetcher.go b/database/postgres/early_adopter_feature_fetcher.go deleted file mode 100644 index 10fffd41f8..0000000000 --- a/database/postgres/early_adopter_feature_fetcher.go +++ /dev/null @@ -1,34 +0,0 @@ -package postgres - -import ( - "context" - "errors" - - "github.com/frain-dev/convoy/database" - fflag "github.com/frain-dev/convoy/internal/pkg/fflag" -) - -// EarlyAdopterFeatureFetcherImpl implements fflag.EarlyAdopterFeatureFetcher -type EarlyAdopterFeatureFetcherImpl struct { - db database.Database -} - -// NewEarlyAdopterFeatureFetcher creates a new EarlyAdopterFeatureFetcher -func NewEarlyAdopterFeatureFetcher(db database.Database) fflag.EarlyAdopterFeatureFetcher { - return &EarlyAdopterFeatureFetcherImpl{db: db} -} - -// FetchEarlyAdopterFeature fetches an early adopter feature for an organisation -func (f *EarlyAdopterFeatureFetcherImpl) FetchEarlyAdopterFeature(ctx context.Context, orgID, featureKey string) (*fflag.EarlyAdopterFeatureInfo, error) { - feature, err := FetchEarlyAdopterFeature(ctx, f.db, orgID, featureKey) - if err != nil { - if errors.Is(err, ErrEarlyAdopterFeatureNotFound) { - return nil, err - } - return nil, err - } - - return &fflag.EarlyAdopterFeatureInfo{ - Enabled: feature.Enabled, - }, nil -} diff --git a/database/postgres/feature_flag.go b/database/postgres/feature_flag.go deleted file mode 100644 index 9a90cd0d1e..0000000000 --- a/database/postgres/feature_flag.go +++ /dev/null @@ -1,278 +0,0 @@ -package postgres - -import ( - "context" - "database/sql" - "errors" - "time" - - "github.com/oklog/ulid/v2" - - "github.com/frain-dev/convoy/database" - "github.com/frain-dev/convoy/datastore" -) - -var ( - ErrFeatureFlagNotFound = errors.New("feature flag not found") - ErrFeatureFlagOverrideNotFound = errors.New("feature flag override not found") - ErrEarlyAdopterFeatureNotFound = errors.New("early adopter feature not found") -) - -const ( - fetchFeatureFlagByKey = ` - SELECT * FROM convoy.feature_flags - WHERE feature_key = $1; - ` - - fetchFeatureFlagByID = ` - SELECT * FROM convoy.feature_flags - WHERE id = $1; - ` - - loadFeatureFlags = ` - SELECT * FROM convoy.feature_flags - ORDER BY feature_key; - ` - - fetchFeatureFlagOverride = ` - SELECT * FROM convoy.feature_flag_overrides - WHERE owner_type = $1 AND owner_id = $2 AND feature_flag_id = $3; - ` - - loadFeatureFlagOverridesByOwner = ` - SELECT * FROM convoy.feature_flag_overrides - WHERE owner_type = $1 AND owner_id = $2; - ` - - loadFeatureFlagOverridesByFeatureFlag = ` - SELECT * FROM convoy.feature_flag_overrides - WHERE feature_flag_id = $1; - ` -) - -// FetchFeatureFlagByKey fetches a feature flag by its key -func FetchFeatureFlagByKey(ctx context.Context, db database.Database, key string) (*datastore.FeatureFlag, error) { - flag := &datastore.FeatureFlag{} - err := db.GetDB().QueryRowxContext(ctx, fetchFeatureFlagByKey, key).StructScan(flag) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, ErrFeatureFlagNotFound - } - return nil, err - } - - return flag, nil -} - -// FetchFeatureFlagByID fetches a feature flag by its ID -func FetchFeatureFlagByID(ctx context.Context, db database.Database, id string) (*datastore.FeatureFlag, error) { - flag := &datastore.FeatureFlag{} - err := db.GetDB().QueryRowxContext(ctx, fetchFeatureFlagByID, id).StructScan(flag) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, ErrFeatureFlagNotFound - } - return nil, err - } - - return flag, nil -} - -// LoadFeatureFlags fetches all feature flags -func LoadFeatureFlags(ctx context.Context, db database.Database) ([]datastore.FeatureFlag, error) { - flags := []datastore.FeatureFlag{} - err := db.GetDB().SelectContext(ctx, &flags, loadFeatureFlags) - if err != nil { - return nil, err - } - - return flags, nil -} - -// FetchFeatureFlagOverride fetches a feature flag override for a specific owner -func FetchFeatureFlagOverride(ctx context.Context, db database.Database, ownerType, ownerID, featureFlagID string) (*datastore.FeatureFlagOverride, error) { - override := &datastore.FeatureFlagOverride{} - err := db.GetDB().QueryRowxContext(ctx, fetchFeatureFlagOverride, ownerType, ownerID, featureFlagID).StructScan(override) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, ErrFeatureFlagOverrideNotFound - } - return nil, err - } - - return override, nil -} - -// LoadFeatureFlagOverridesByOwner fetches all feature flag overrides for a specific owner -func LoadFeatureFlagOverridesByOwner(ctx context.Context, db database.Database, ownerType, ownerID string) ([]datastore.FeatureFlagOverride, error) { - overrides := []datastore.FeatureFlagOverride{} - err := db.GetDB().SelectContext(ctx, &overrides, loadFeatureFlagOverridesByOwner, ownerType, ownerID) - if err != nil { - return nil, err - } - - return overrides, nil -} - -// LoadFeatureFlagOverridesByFeatureFlag fetches all overrides for a specific feature flag -func LoadFeatureFlagOverridesByFeatureFlag(ctx context.Context, db database.Database, featureFlagID string) ([]datastore.FeatureFlagOverride, error) { - overrides := []datastore.FeatureFlagOverride{} - err := db.GetDB().SelectContext(ctx, &overrides, loadFeatureFlagOverridesByFeatureFlag, featureFlagID) - if err != nil { - return nil, err - } - - return overrides, nil -} - -const ( - createFeatureFlagOverride = ` - INSERT INTO convoy.feature_flag_overrides (id, feature_flag_id, owner_type, owner_id, enabled, enabled_at, enabled_by) - VALUES ($1, $2, $3, $4, $5, $6, $7) - ON CONFLICT (owner_type, owner_id, feature_flag_id) - DO UPDATE SET enabled = $5, enabled_at = $6, enabled_by = $7, updated_at = NOW(); - ` - - deleteFeatureFlagOverride = ` - DELETE FROM convoy.feature_flag_overrides - WHERE owner_type = $1 AND owner_id = $2 AND feature_flag_id = $3; - ` - - updateFeatureFlag = ` - UPDATE convoy.feature_flags - SET enabled = $1, updated_at = NOW() - WHERE id = $2; - ` -) - -const ( - fetchEarlyAdopterFeature = ` - SELECT * FROM convoy.early_adopter_features - WHERE organisation_id = $1 AND feature_key = $2; - ` - - loadEarlyAdopterFeaturesByOrg = ` - SELECT * FROM convoy.early_adopter_features - WHERE organisation_id = $1 - ORDER BY feature_key; - ` - - upsertEarlyAdopterFeature = ` - INSERT INTO convoy.early_adopter_features (id, organisation_id, feature_key, enabled, enabled_by, enabled_at) - VALUES ($1, $2, $3, $4, $5, $6) - ON CONFLICT (organisation_id, feature_key) - DO UPDATE SET enabled = $4, enabled_by = $5, enabled_at = $6, updated_at = NOW(); - ` - - deleteEarlyAdopterFeature = ` - DELETE FROM convoy.early_adopter_features - WHERE organisation_id = $1 AND feature_key = $2; - ` -) - -// UpsertFeatureFlagOverride creates or updates a feature flag override -func UpsertFeatureFlagOverride(ctx context.Context, db database.Database, override *datastore.FeatureFlagOverride) error { - if override.UID == "" { - override.UID = ulid.Make().String() - } - - var enabledAt interface{} - if override.EnabledAt.Valid { - enabledAt = override.EnabledAt.Time - } else if override.Enabled { - enabledAt = time.Now() - } else { - enabledAt = nil - } - - var enabledBy interface{} - if override.EnabledBy.Valid { - enabledBy = override.EnabledBy.String - } else { - enabledBy = nil - } - - _, err := db.GetDB().ExecContext(ctx, createFeatureFlagOverride, - override.UID, override.FeatureFlagID, override.OwnerType, override.OwnerID, - override.Enabled, enabledAt, enabledBy) - if err != nil { - return err - } - - return nil -} - -// DeleteFeatureFlagOverride deletes a feature flag override -func DeleteFeatureFlagOverride(ctx context.Context, db database.Database, ownerType, ownerID, featureFlagID string) error { - _, err := db.GetDB().ExecContext(ctx, deleteFeatureFlagOverride, ownerType, ownerID, featureFlagID) - return err -} - -// UpdateFeatureFlag updates the enabled state of a feature flag -func UpdateFeatureFlag(ctx context.Context, db database.Database, featureFlagID string, enabled bool) error { - _, err := db.GetDB().ExecContext(ctx, updateFeatureFlag, enabled, featureFlagID) - return err -} - -// FetchEarlyAdopterFeature fetches an early adopter feature for an organisation -func FetchEarlyAdopterFeature(ctx context.Context, db database.Database, orgID, featureKey string) (*datastore.EarlyAdopterFeature, error) { - feature := &datastore.EarlyAdopterFeature{} - err := db.GetDB().QueryRowxContext(ctx, fetchEarlyAdopterFeature, orgID, featureKey).StructScan(feature) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, ErrEarlyAdopterFeatureNotFound - } - return nil, err - } - - return feature, nil -} - -// LoadEarlyAdopterFeaturesByOrg fetches all early adopter features for an organisation -func LoadEarlyAdopterFeaturesByOrg(ctx context.Context, db database.Database, orgID string) ([]datastore.EarlyAdopterFeature, error) { - features := []datastore.EarlyAdopterFeature{} - err := db.GetDB().SelectContext(ctx, &features, loadEarlyAdopterFeaturesByOrg, orgID) - if err != nil { - return nil, err - } - - return features, nil -} - -// UpsertEarlyAdopterFeature creates or updates an early adopter feature -func UpsertEarlyAdopterFeature(ctx context.Context, db database.Database, feature *datastore.EarlyAdopterFeature) error { - if feature.UID == "" { - feature.UID = ulid.Make().String() - } - - var enabledAt interface{} - if feature.EnabledAt.Valid { - enabledAt = feature.EnabledAt.Time - } else if feature.Enabled { - enabledAt = time.Now() - } else { - enabledAt = nil - } - - var enabledBy interface{} - if feature.EnabledBy.Valid { - enabledBy = feature.EnabledBy.String - } else { - enabledBy = nil - } - - _, err := db.GetDB().ExecContext(ctx, upsertEarlyAdopterFeature, - feature.UID, feature.OrganisationID, feature.FeatureKey, - feature.Enabled, enabledBy, enabledAt) - if err != nil { - return err - } - - return nil -} - -// DeleteEarlyAdopterFeature deletes an early adopter feature -func DeleteEarlyAdopterFeature(ctx context.Context, db database.Database, orgID, featureKey string) error { - _, err := db.GetDB().ExecContext(ctx, deleteEarlyAdopterFeature, orgID, featureKey) - return err -} diff --git a/database/postgres/feature_flag_fetcher.go b/database/postgres/feature_flag_fetcher.go deleted file mode 100644 index 876556d30c..0000000000 --- a/database/postgres/feature_flag_fetcher.go +++ /dev/null @@ -1,47 +0,0 @@ -package postgres - -import ( - "context" - "errors" - - "github.com/frain-dev/convoy/database" - fflag "github.com/frain-dev/convoy/internal/pkg/fflag" -) - -// FeatureFlagFetcherImpl implements fflag.FeatureFlagFetcher -type FeatureFlagFetcherImpl struct { - db database.Database -} - -// NewFeatureFlagFetcher creates a new FeatureFlagFetcher -func NewFeatureFlagFetcher(db database.Database) fflag.FeatureFlagFetcher { - return &FeatureFlagFetcherImpl{db: db} -} - -// FetchFeatureFlag fetches a feature flag by key -func (f *FeatureFlagFetcherImpl) FetchFeatureFlag(ctx context.Context, key string) (*fflag.FeatureFlagInfo, error) { - flag, err := FetchFeatureFlagByKey(ctx, f.db, key) - if err != nil { - if errors.Is(err, ErrFeatureFlagNotFound) { - return nil, err - } - return nil, err - } - - return &fflag.FeatureFlagInfo{ - UID: flag.UID, - Enabled: flag.Enabled, - }, nil -} - -// FetchFeatureFlagOverride fetches a feature flag override -func (f *FeatureFlagFetcherImpl) FetchFeatureFlagOverride(ctx context.Context, ownerType, ownerID, featureFlagID string) (*fflag.FeatureFlagOverrideInfo, error) { - override, err := FetchFeatureFlagOverride(ctx, f.db, ownerType, ownerID, featureFlagID) - if err != nil { - return nil, err - } - - return &fflag.FeatureFlagOverrideInfo{ - Enabled: override.Enabled, - }, nil -} diff --git a/database/postgres/job.go b/database/postgres/job.go deleted file mode 100644 index 1a7b967d18..0000000000 --- a/database/postgres/job.go +++ /dev/null @@ -1,370 +0,0 @@ -package postgres - -import ( - "context" - "database/sql" - "errors" - "fmt" - - "github.com/jmoiron/sqlx" - - "github.com/frain-dev/convoy/database" - "github.com/frain-dev/convoy/datastore" -) - -var ( - ErrJobNotFound = errors.New("job not found") - ErrJobNotCreated = errors.New("job could not be created") - ErrJobNotUpdated = errors.New("job could not be updated") - ErrJobNotDeleted = errors.New("job could not be deleted") -) - -const ( - createJob = ` - INSERT INTO convoy.jobs (id, type, status, project_id) - VALUES ($1, $2, $3, $4) - ` - - updateJobStartedAt = ` - UPDATE convoy.jobs SET - status = 'running', - started_at = NOW(), - updated_at = NOW() - WHERE id = $1 AND project_id = $2 AND deleted_at IS NULL; - ` - - updateJobCompletedAt = ` - UPDATE convoy.jobs SET - status = 'completed', - completed_at = NOW(), - updated_at = NOW() - WHERE id = $1 AND project_id = $2 AND deleted_at IS NULL; - ` - - updateJobFailedAt = ` - UPDATE convoy.jobs SET - status = 'failed', - failed_at = NOW(), - updated_at = NOW() - WHERE id = $1 AND project_id = $2 AND deleted_at IS NULL; - ` - - deleteJob = ` - UPDATE convoy.jobs SET - deleted_at = NOW() - WHERE id = $1 AND project_id = $2 AND deleted_at IS NULL; - ` - - fetchJobById = ` - SELECT * FROM convoy.jobs - WHERE id = $1 AND project_id = $2 AND deleted_at IS NULL; - ` - - fetchRunningJobsByProjectId = ` - SELECT * FROM convoy.jobs - WHERE status = 'running' - AND project_id = $1 - AND deleted_at IS NULL; - ` - - fetchJobsByProjectId = ` - SELECT * FROM convoy.jobs WHERE project_id = $1 AND deleted_at IS NULL; - ` - - fetchJobsPaginated = ` - SELECT * FROM convoy.jobs WHERE deleted_at IS NULL` - - baseJobsFilter = ` - AND project_id = :project_id` - - baseFetchJobsPagedForward = ` - %s - %s - AND id <= :cursor - GROUP BY id - ORDER BY id DESC - LIMIT :limit - ` - - baseFetchJobsPagedBackward = ` - WITH jobs AS ( - %s - %s - AND id >= :cursor - GROUP BY id - ORDER BY id ASC - LIMIT :limit - ) - - SELECT * FROM jobs ORDER BY id DESC - ` - - countPrevJobs = ` - SELECT COUNT(DISTINCT(id)) AS count - FROM convoy.jobs - WHERE deleted_at IS NULL - %s - AND id > :cursor GROUP BY id ORDER BY id DESC LIMIT 1` -) - -type jobRepo struct { - db database.Database -} - -func NewJobRepo(db database.Database) datastore.JobRepository { - return &jobRepo{db: db} -} - -func (j *jobRepo) CreateJob(ctx context.Context, job *datastore.Job) error { - r, err := j.db.GetDB().ExecContext(ctx, createJob, - job.UID, - job.Type, - job.Status, - job.ProjectID, - ) - if err != nil { - return err - } - - rowsAffected, err := r.RowsAffected() - if err != nil { - return err - } - - if rowsAffected < 1 { - return ErrJobNotCreated - } - - return nil -} - -func (j *jobRepo) MarkJobAsStarted(ctx context.Context, uid, projectID string) error { - r, err := j.db.GetDB().ExecContext(ctx, updateJobStartedAt, uid, projectID) - if err != nil { - return err - } - - rowsAffected, err := r.RowsAffected() - if err != nil { - return err - } - - if rowsAffected < 1 { - return ErrJobNotUpdated - } - - return nil -} - -func (j *jobRepo) MarkJobAsCompleted(ctx context.Context, uid, projectID string) error { - r, err := j.db.GetDB().ExecContext(ctx, updateJobCompletedAt, uid, projectID) - if err != nil { - return err - } - - rowsAffected, err := r.RowsAffected() - if err != nil { - return err - } - - if rowsAffected < 1 { - return ErrJobNotUpdated - } - - return nil -} - -func (j *jobRepo) MarkJobAsFailed(ctx context.Context, uid, projectID string) error { - r, err := j.db.GetDB().ExecContext(ctx, updateJobFailedAt, uid, projectID) - if err != nil { - return err - } - - rowsAffected, err := r.RowsAffected() - if err != nil { - return err - } - - if rowsAffected < 1 { - return ErrJobNotUpdated - } - - return nil -} - -func (j *jobRepo) DeleteJob(ctx context.Context, uid, projectID string) error { - r, err := j.db.GetDB().ExecContext(ctx, deleteJob, uid, projectID) - if err != nil { - return err - } - - rowsAffected, err := r.RowsAffected() - if err != nil { - return err - } - - if rowsAffected < 1 { - return ErrJobNotDeleted - } - - return nil -} - -func (j *jobRepo) FetchJobById(ctx context.Context, uid, projectID string) (*datastore.Job, error) { - job := &datastore.Job{} - err := j.db.GetDB().QueryRowxContext(ctx, fetchJobById, uid, projectID).StructScan(job) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, ErrJobNotFound - } - return nil, err - } - - return job, nil -} - -func (j *jobRepo) FetchRunningJobsByProjectId(ctx context.Context, projectID string) ([]datastore.Job, error) { - var jobs []datastore.Job - rows, err := j.db.GetReadDB().QueryxContext(ctx, fetchRunningJobsByProjectId, projectID) - if err != nil { - return nil, err - } - defer closeWithError(rows) - - for rows.Next() { - var job datastore.Job - - err = rows.StructScan(&job) - if err != nil { - return nil, err - } - - jobs = append(jobs, job) - } - - return jobs, nil -} - -func (j *jobRepo) FetchJobsByProjectId(ctx context.Context, projectID string) ([]datastore.Job, error) { - var jobs []datastore.Job - rows, err := j.db.GetReadDB().QueryxContext(ctx, fetchJobsByProjectId, projectID) - if err != nil { - return nil, err - } - defer closeWithError(rows) - - for rows.Next() { - var job datastore.Job - - err = rows.StructScan(&job) - if err != nil { - return nil, err - } - - jobs = append(jobs, job) - } - - return jobs, nil -} - -func (j *jobRepo) LoadJobsPaged(ctx context.Context, projectID string, pageable datastore.Pageable) ([]datastore.Job, datastore.PaginationData, error) { - var query, filterQuery string - var args []interface{} - var err error - - arg := map[string]interface{}{ - "project_id": projectID, - "limit": pageable.Limit(), - "cursor": pageable.Cursor(), - } - - if pageable.Direction == datastore.Next { - query = baseFetchJobsPagedForward - } else { - query = baseFetchJobsPagedBackward - } - - filterQuery = baseJobsFilter - - query = fmt.Sprintf(query, fetchJobsPaginated, filterQuery) - - query, args, err = sqlx.Named(query, arg) - if err != nil { - return nil, datastore.PaginationData{}, err - } - - query, args, err = sqlx.In(query, args...) - if err != nil { - return nil, datastore.PaginationData{}, err - } - - query = j.db.GetReadDB().Rebind(query) - - rows, err := j.db.GetReadDB().QueryxContext(ctx, query, args...) - if err != nil { - return nil, datastore.PaginationData{}, err - } - defer closeWithError(rows) - - var jobs []datastore.Job - for rows.Next() { - var data JobPaginated - - err = rows.StructScan(&data) - if err != nil { - return nil, datastore.PaginationData{}, err - } - - jobs = append(jobs, data.Job) - } - - var count datastore.PrevRowCount - if len(jobs) > 0 { - var countQuery string - var qargs []interface{} - first := jobs[0] - qarg := arg - qarg["cursor"] = first.UID - - cq := fmt.Sprintf(countPrevJobs, filterQuery) - countQuery, qargs, err = sqlx.Named(cq, qarg) - if err != nil { - return nil, datastore.PaginationData{}, err - } - - countQuery = j.db.GetReadDB().Rebind(countQuery) - - // count the row number before the first row - rows, err := j.db.GetReadDB().QueryxContext(ctx, countQuery, qargs...) - if err != nil { - return nil, datastore.PaginationData{}, err - } - defer closeWithError(rows) - - if rows.Next() { - err = rows.StructScan(&count) - if err != nil { - return nil, datastore.PaginationData{}, err - } - } - } - - ids := make([]string, len(jobs)) - for i := range jobs { - ids[i] = jobs[i].UID - } - - if len(jobs) > pageable.PerPage { - jobs = jobs[:len(jobs)-1] - } - - pagination := &datastore.PaginationData{PrevRowCount: count} - pagination = pagination.Build(pageable, ids) - - return jobs, *pagination, nil -} - -type JobPaginated struct { - Count int - datastore.Job -} diff --git a/database/postgres/job_test.go b/database/postgres/job_test.go deleted file mode 100644 index 8a55f844ce..0000000000 --- a/database/postgres/job_test.go +++ /dev/null @@ -1,345 +0,0 @@ -//go:build integration - -package postgres - -import ( - "context" - "testing" - "time" - - "github.com/oklog/ulid/v2" - "github.com/stretchr/testify/require" - "gopkg.in/guregu/null.v4" - - "github.com/frain-dev/convoy/database" - "github.com/frain-dev/convoy/datastore" -) - -func Test_CreateJob(t *testing.T) { - db, closeFn := getDB(t) - defer closeFn() - - jobRepo := NewJobRepo(db) - job := generateJob(t, db) - - require.NoError(t, jobRepo.CreateJob(context.Background(), job)) - - jobById, err := jobRepo.FetchJobById(context.Background(), job.UID, job.ProjectID) - require.NoError(t, err) - - require.NotNil(t, jobById) - require.Equal(t, datastore.JobStatusReady, jobById.Status) -} - -func TestJobRepo_FetchJobsByProjectId(t *testing.T) { - db, closeFn := getDB(t) - defer closeFn() - - org := seedOrg(t, db) - jobRepo := NewJobRepo(db) - - p1 := &datastore.Project{ - UID: ulid.Make().String(), - Name: "P1", - OrganisationID: org.UID, - Type: datastore.IncomingProject, - Config: &datastore.DefaultProjectConfig, - } - - p2 := &datastore.Project{ - UID: ulid.Make().String(), - Name: "P2", - OrganisationID: org.UID, - Type: datastore.IncomingProject, - Config: &datastore.DefaultProjectConfig, - } - - err := NewProjectRepo(db).CreateProject(context.Background(), p1) - require.NoError(t, err) - - err = NewProjectRepo(db).CreateProject(context.Background(), p2) - require.NoError(t, err) - - require.NoError(t, jobRepo.CreateJob(context.Background(), &datastore.Job{ - UID: ulid.Make().String(), - Type: "create", - Status: datastore.JobStatusRunning, - StartedAt: null.TimeFrom(time.Now()), - ProjectID: p1.UID, - CreatedAt: time.Now(), - UpdatedAt: time.Now(), - })) - - require.NoError(t, jobRepo.CreateJob(context.Background(), &datastore.Job{ - UID: ulid.Make().String(), - Type: "update", - Status: datastore.JobStatusCompleted, - StartedAt: null.TimeFrom(time.Now()), - CompletedAt: null.TimeFrom(time.Now()), - ProjectID: p2.UID, - CreatedAt: time.Now(), - UpdatedAt: time.Now(), - })) - - require.NoError(t, jobRepo.CreateJob(context.Background(), &datastore.Job{ - UID: ulid.Make().String(), - Type: "update", - Status: datastore.JobStatusFailed, - StartedAt: null.TimeFrom(time.Now()), - FailedAt: null.TimeFrom(time.Now()), - ProjectID: p2.UID, - CreatedAt: time.Now(), - UpdatedAt: time.Now(), - })) - - jobs, err := jobRepo.FetchJobsByProjectId(context.Background(), p2.UID) - require.NoError(t, err) - - require.Equal(t, 2, len(jobs)) -} - -func TestJobRepo_FetchRunningJobsByProjectId(t *testing.T) { - db, closeFn := getDB(t) - defer closeFn() - - org := seedOrg(t, db) - jobRepo := NewJobRepo(db) - - p1 := &datastore.Project{ - UID: ulid.Make().String(), - Name: "P1", - OrganisationID: org.UID, - Type: datastore.IncomingProject, - Config: &datastore.DefaultProjectConfig, - } - - p2 := &datastore.Project{ - UID: ulid.Make().String(), - Name: "P2", - OrganisationID: org.UID, - Type: datastore.IncomingProject, - Config: &datastore.DefaultProjectConfig, - } - - err := NewProjectRepo(db).CreateProject(context.Background(), p1) - require.NoError(t, err) - - err = NewProjectRepo(db).CreateProject(context.Background(), p2) - require.NoError(t, err) - - require.NoError(t, jobRepo.CreateJob(context.Background(), &datastore.Job{ - UID: ulid.Make().String(), - Type: "create", - Status: datastore.JobStatusRunning, - StartedAt: null.TimeFrom(time.Now()), - ProjectID: p1.UID, - CreatedAt: time.Now(), - UpdatedAt: time.Now(), - })) - - require.NoError(t, jobRepo.CreateJob(context.Background(), &datastore.Job{ - UID: ulid.Make().String(), - Type: "update", - Status: datastore.JobStatusRunning, - StartedAt: null.TimeFrom(time.Now()), - ProjectID: p2.UID, - CreatedAt: time.Now(), - UpdatedAt: time.Now(), - })) - - require.NoError(t, jobRepo.CreateJob(context.Background(), &datastore.Job{ - UID: ulid.Make().String(), - Type: "update", - Status: datastore.JobStatusFailed, - StartedAt: null.TimeFrom(time.Now()), - FailedAt: null.TimeFrom(time.Now()), - ProjectID: p2.UID, - CreatedAt: time.Now(), - UpdatedAt: time.Now(), - })) - - jobs, err := jobRepo.FetchRunningJobsByProjectId(context.Background(), p2.UID) - require.NoError(t, err) - - require.Equal(t, 1, len(jobs)) -} - -func TestJobRepo_MarkJobAsStarted(t *testing.T) { - db, closeFn := getDB(t) - defer closeFn() - - jobRepo := NewJobRepo(db) - job := generateJob(t, db) - - ctx := context.Background() - - require.NoError(t, jobRepo.CreateJob(ctx, job)) - - require.NoError(t, jobRepo.MarkJobAsStarted(ctx, job.UID, job.ProjectID)) - - jobById, err := jobRepo.FetchJobById(ctx, job.UID, job.ProjectID) - require.NoError(t, err) - - require.Equal(t, datastore.JobStatusRunning, jobById.Status) - require.Less(t, time.Time{}.Unix(), jobById.StartedAt.Time.Unix()) - require.True(t, time.Now().After(jobById.StartedAt.Time)) - require.Equal(t, time.Time{}.Unix(), jobById.FailedAt.Time.Unix()) - require.Equal(t, time.Time{}.Unix(), jobById.CompletedAt.Time.Unix()) -} - -func TestJobRepo_MarkJobAsCompleted(t *testing.T) { - db, closeFn := getDB(t) - defer closeFn() - - jobRepo := NewJobRepo(db) - job := generateJob(t, db) - - ctx := context.Background() - - require.NoError(t, jobRepo.CreateJob(ctx, job)) - - require.NoError(t, jobRepo.MarkJobAsStarted(ctx, job.UID, job.ProjectID)) - require.NoError(t, jobRepo.MarkJobAsCompleted(ctx, job.UID, job.ProjectID)) - - jobById, err := jobRepo.FetchJobById(ctx, job.UID, job.ProjectID) - require.NoError(t, err) - - require.Equal(t, datastore.JobStatusCompleted, jobById.Status) - require.Less(t, time.Time{}.Unix(), jobById.StartedAt.Time.Unix()) - require.True(t, time.Now().After(jobById.StartedAt.Time)) - require.Equal(t, time.Time{}.Unix(), jobById.FailedAt.Time.Unix()) -} - -func TestJobRepo_MarkJobAsFailed(t *testing.T) { - db, closeFn := getDB(t) - defer closeFn() - - jobRepo := NewJobRepo(db) - job := generateJob(t, db) - - ctx := context.Background() - - require.NoError(t, jobRepo.CreateJob(ctx, job)) - - require.NoError(t, jobRepo.MarkJobAsStarted(ctx, job.UID, job.ProjectID)) - require.NoError(t, jobRepo.MarkJobAsFailed(ctx, job.UID, job.ProjectID)) - - jobById, err := jobRepo.FetchJobById(ctx, job.UID, job.ProjectID) - require.NoError(t, err) - - require.Equal(t, datastore.JobStatusFailed, jobById.Status) - require.Less(t, time.Time{}.Unix(), jobById.StartedAt.Time.Unix()) - require.True(t, time.Now().After(jobById.StartedAt.Time)) - require.Equal(t, time.Time{}.Unix(), jobById.CompletedAt.Time.Unix()) -} - -func TestJobRepo_DeleteJob(t *testing.T) { - db, closeFn := getDB(t) - defer closeFn() - - jobRepo := NewJobRepo(db) - job := generateJob(t, db) - - require.NoError(t, jobRepo.CreateJob(context.Background(), job)) - - err := jobRepo.DeleteJob(context.Background(), job.UID, job.ProjectID) - require.NoError(t, err) - - _, err = jobRepo.FetchJobById(context.Background(), job.UID, job.ProjectID) - require.Equal(t, ErrJobNotFound, err) -} - -func Test_LoadJobsPaged(t *testing.T) { - type Expected struct { - paginationData datastore.PaginationData - } - - tests := []struct { - name string - pageData datastore.Pageable - count int - expected Expected - }{ - { - name: "Load Jobs Paged - 10 records", - pageData: datastore.Pageable{PerPage: 3}, - count: 10, - expected: Expected{ - paginationData: datastore.PaginationData{ - PerPage: 3, - }, - }, - }, - - { - name: "Load Jobs Paged - 12 records", - pageData: datastore.Pageable{PerPage: 4}, - count: 12, - expected: Expected{ - paginationData: datastore.PaginationData{ - PerPage: 4, - }, - }, - }, - - { - name: "Load Jobs Paged - 5 records", - pageData: datastore.Pageable{PerPage: 3}, - count: 5, - expected: Expected{ - paginationData: datastore.PaginationData{ - PerPage: 3, - }, - }, - }, - - { - name: "Load Jobs Paged - 1 record", - pageData: datastore.Pageable{PerPage: 3}, - count: 1, - expected: Expected{ - paginationData: datastore.PaginationData{ - PerPage: 3, - }, - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - db, closeFn := getDB(t) - defer closeFn() - - jobRepository := NewJobRepo(db) - project := seedProject(t, db) - - for i := 0; i < tc.count; i++ { - job := &datastore.Job{ - UID: ulid.Make().String(), - ProjectID: project.UID, - Status: datastore.JobStatusReady, - } - - require.NoError(t, jobRepository.CreateJob(context.Background(), job)) - } - - _, pageable, err := jobRepository.LoadJobsPaged(context.Background(), project.UID, tc.pageData) - require.NoError(t, err) - - require.Equal(t, tc.expected.paginationData.PerPage, pageable.PerPage) - }) - } -} - -func generateJob(t *testing.T, db database.Database) *datastore.Job { - project := seedProject(t, db) - - return &datastore.Job{ - UID: ulid.Make().String(), - Type: "search_tokenizer", - Status: datastore.JobStatusReady, - ProjectID: project.UID, - CreatedAt: time.Now(), - UpdatedAt: time.Now(), - } -} diff --git a/datastore/models.go b/datastore/models.go index 5d2f8acebe..74d485ade3 100644 --- a/datastore/models.go +++ b/datastore/models.go @@ -762,6 +762,10 @@ var ( ErrNoActiveSecret = errors.New("no active secret found") ErrSecretNotFound = errors.New("secret not found") ErrMetaEventNotFound = errors.New("meta event not found") + ErrJobNotFound = errors.New("job not found") + ErrFeatureFlagNotFound = errors.New("feature flag not found") + ErrFeatureFlagOverrideNotFound = errors.New("feature flag override not found") + ErrEarlyAdopterFeatureNotFound = errors.New("early adopter feature not found") ) type AppMetadata struct { diff --git a/docs/jobs-and-feature-flags-sqlc-migration.md b/docs/jobs-and-feature-flags-sqlc-migration.md new file mode 100644 index 0000000000..736f12426f --- /dev/null +++ b/docs/jobs-and-feature-flags-sqlc-migration.md @@ -0,0 +1,44 @@ +# Jobs & Feature Flags: SQLc Migration Progress + +## Overview +Migration of the last remaining `database/postgres/` repository implementations to sqlc-based modules under `internal/`. + +## Modules Migrated + +### 1. Jobs (`internal/jobs/`) +- **Interface**: `datastore.JobRepository` (9 methods) +- **Old implementation**: `database/postgres/job.go` (deleted) +- **New files**: `internal/jobs/impl.go`, `internal/jobs/queries.sql`, `internal/jobs/repo/` (generated) +- **Call site**: `internal/dataplane/worker.go` updated from `postgres.NewJobRepo()` to `jobs.New()` + +### 2. Feature Flags (`internal/feature_flags/`) +- **Interfaces**: `fflag.FeatureFlagFetcher`, `fflag.EarlyAdopterFeatureFetcher` +- **Old implementation**: `database/postgres/feature_flag.go`, `feature_flag_fetcher.go`, `early_adopter_feature_fetcher.go` (all deleted) +- **New files**: `internal/feature_flags/impl.go`, `internal/feature_flags/queries.sql`, `internal/feature_flags/repo/` (generated) +- **Tables**: `convoy.feature_flags`, `convoy.feature_flag_overrides`, `convoy.early_adopter_features` +- **Call sites updated**: + - `cmd/server/server.go` + - `internal/dataplane/worker.go` + - `api/handlers/organisation.go` + - `cmd/utils/org_feature_flags.go` + - `api/server_suite_test.go` + - `api/oss_login_integration_test.go` + - `api/oauth2_integration_test.go` + - `e2e/oauth2_e2e_test.go` + +## Files Deleted +- `database/postgres/job.go` +- `database/postgres/job_test.go` +- `database/postgres/feature_flag.go` +- `database/postgres/feature_flag_fetcher.go` +- `database/postgres/early_adopter_feature_fetcher.go` + +## Remaining in `database/postgres/` +Only infrastructure files: +- `postgres.go` - DB connection pool management +- `postgres_collector.go` - Prometheus metrics +- `postgres_test.go` - Infrastructure tests +- `pkg_logger.go` - Package logger + +## Status: Complete +All repository implementations have been migrated to sqlc. diff --git a/e2e/oauth2_e2e_test.go b/e2e/oauth2_e2e_test.go index 34a7fe16f7..1be8a42da7 100644 --- a/e2e/oauth2_e2e_test.go +++ b/e2e/oauth2_e2e_test.go @@ -24,9 +24,10 @@ import ( "github.com/frain-dev/convoy/api/models" "github.com/frain-dev/convoy/database" - "github.com/frain-dev/convoy/database/postgres" "github.com/frain-dev/convoy/datastore" + "github.com/frain-dev/convoy/internal/feature_flags" "github.com/frain-dev/convoy/internal/pkg/fflag" + log "github.com/frain-dev/convoy/pkg/logger" "github.com/frain-dev/convoy/util" ) @@ -436,6 +437,8 @@ func generateTestJWK(t *testing.T) *datastore.OAuth2SigningKey { func enableOAuth2FeatureFlag(t *testing.T, db database.Database, orgID string) error { t.Helper() + ffService := feature_flags.New(log.New("convoy", log.LevelError), db) + feature := &datastore.EarlyAdopterFeature{ OrganisationID: orgID, FeatureKey: string(fflag.OAuthTokenExchange), @@ -443,5 +446,5 @@ func enableOAuth2FeatureFlag(t *testing.T, db database.Database, orgID string) e EnabledAt: null.TimeFrom(time.Now()), } - return postgres.UpsertEarlyAdopterFeature(context.Background(), db, feature) + return ffService.UpsertEarlyAdopterFeature(context.Background(), feature) } diff --git a/internal/dataplane/worker.go b/internal/dataplane/worker.go index cb11729920..a76d84ed46 100644 --- a/internal/dataplane/worker.go +++ b/internal/dataplane/worker.go @@ -12,7 +12,6 @@ import ( "github.com/frain-dev/convoy" "github.com/frain-dev/convoy/config" - "github.com/frain-dev/convoy/database/postgres" "github.com/frain-dev/convoy/datastore" batch_retries "github.com/frain-dev/convoy/internal/batch_retries" "github.com/frain-dev/convoy/internal/configuration" @@ -20,7 +19,9 @@ import ( "github.com/frain-dev/convoy/internal/endpoints" "github.com/frain-dev/convoy/internal/event_deliveries" "github.com/frain-dev/convoy/internal/events" + "github.com/frain-dev/convoy/internal/feature_flags" "github.com/frain-dev/convoy/internal/filters" + "github.com/frain-dev/convoy/internal/jobs" "github.com/frain-dev/convoy/internal/meta_events" "github.com/frain-dev/convoy/internal/organisations" "github.com/frain-dev/convoy/internal/pkg/billing" @@ -116,13 +117,14 @@ func NewWorker(ctx context.Context, opts RuntimeOpts, cfg config.Configuration) metaEventRepo := meta_events.New(opts.Logger, opts.DB) endpointRepo := endpoints.New(opts.Logger, opts.DB) eventRepo := events.New(opts.Logger, opts.DB) - jobRepo := postgres.NewJobRepo(opts.DB) + jobRepo := jobs.New(opts.Logger, opts.DB) eventDeliveryRepo := event_deliveries.New(opts.Logger, opts.DB) subRepo := subscriptions.New(opts.Logger, opts.DB) configRepo := configuration.New(opts.Logger, opts.DB) attemptRepo := delivery_attempts.New(opts.Logger, opts.DB) filterRepo := filters.New(opts.Logger, opts.DB) batchRetryRepo := batch_retries.New(lo, opts.DB) + ffService := feature_flags.New(opts.Logger, opts.DB) rd, err := rdb.NewClientFromRedisConfig(cfg.Redis) if err != nil { @@ -304,8 +306,8 @@ func NewWorker(ctx context.Context, opts RuntimeOpts, cfg config.Configuration) AttemptsRepo: attemptRepo, CircuitBreakerManager: circuitBreakerManager, FeatureFlag: featureFlag, - FeatureFlagFetcher: postgres.NewFeatureFlagFetcher(opts.DB), - EarlyAdopterFeatureFetcher: postgres.NewEarlyAdopterFeatureFetcher(opts.DB), + FeatureFlagFetcher: ffService, + EarlyAdopterFeatureFetcher: ffService, TracerBackend: opts.TracerBackend, OAuth2TokenService: oauth2TokenService, Logger: lo, @@ -324,7 +326,7 @@ func NewWorker(ctx context.Context, opts RuntimeOpts, cfg config.Configuration) TracerBackend: opts.TracerBackend, OAuth2TokenService: oauth2TokenService, FeatureFlag: featureFlag, - FeatureFlagFetcher: postgres.NewFeatureFlagFetcher(opts.DB), + FeatureFlagFetcher: ffService, Logger: lo, } @@ -351,8 +353,8 @@ func NewWorker(ctx context.Context, opts RuntimeOpts, cfg config.Configuration) TracerBackend: opts.TracerBackend, OAuth2TokenService: oauth2TokenService, FeatureFlag: featureFlag, - FeatureFlagFetcher: postgres.NewFeatureFlagFetcher(opts.DB), - EarlyAdopterFeatureFetcher: postgres.NewEarlyAdopterFeatureFetcher(opts.DB), + FeatureFlagFetcher: ffService, + EarlyAdopterFeatureFetcher: ffService, Logger: lo, } consumer.RegisterHandlers(convoy.MatchEventSubscriptionsProcessor, task.MatchSubscriptionsAndCreateEventDeliveries(matchSubscriptionsDeps), newTelemetry) @@ -382,8 +384,8 @@ func NewWorker(ctx context.Context, opts RuntimeOpts, cfg config.Configuration) ProjectRepo: projectRepo, Licenser: opts.Licenser, FeatureFlag: featureFlag, - FeatureFlagFetcher: postgres.NewFeatureFlagFetcher(opts.DB), - EarlyAdopterFeatureFetcher: postgres.NewEarlyAdopterFeatureFetcher(opts.DB), + FeatureFlagFetcher: ffService, + EarlyAdopterFeatureFetcher: ffService, Logger: lo, } consumer.RegisterHandlers(convoy.BulkOnboardProcessor, task.ProcessBulkOnboard(bulkOnboardDeps), newTelemetry) diff --git a/internal/feature_flags/impl.go b/internal/feature_flags/impl.go new file mode 100644 index 0000000000..709637bccc --- /dev/null +++ b/internal/feature_flags/impl.go @@ -0,0 +1,428 @@ +package feature_flags + +import ( + "context" + "errors" + "time" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + "github.com/oklog/ulid/v2" + + "github.com/frain-dev/convoy/database" + "github.com/frain-dev/convoy/datastore" + "github.com/frain-dev/convoy/internal/common" + "github.com/frain-dev/convoy/internal/feature_flags/repo" + fflag "github.com/frain-dev/convoy/internal/pkg/fflag" + log "github.com/frain-dev/convoy/pkg/logger" +) + +// Service implements feature flag operations using sqlc-generated queries. +// It satisfies both fflag.FeatureFlagFetcher and fflag.EarlyAdopterFeatureFetcher. +type Service struct { + logger log.Logger + repo repo.Querier + db *pgxpool.Pool +} + +// Compile-time interface checks +var _ fflag.FeatureFlagFetcher = (*Service)(nil) +var _ fflag.EarlyAdopterFeatureFetcher = (*Service)(nil) + +// New creates a new feature flags Service. +func New(logger log.Logger, db database.Database) *Service { + return &Service{ + logger: logger, + repo: repo.New(db.GetConn()), + db: db.GetConn(), + } +} + +// ============================================================================ +// Row conversion helpers +// ============================================================================ + +func rowToFeatureFlag(row repo.FetchFeatureFlagByKeyRow) datastore.FeatureFlag { + return datastore.FeatureFlag{ + UID: row.ID, + FeatureKey: row.FeatureKey, + Enabled: row.Enabled, + CreatedAt: common.PgTimestamptzToTime(row.CreatedAt), + UpdatedAt: common.PgTimestamptzToTime(row.UpdatedAt), + } +} + +func rowToFeatureFlagByID(row repo.FetchFeatureFlagByIDRow) datastore.FeatureFlag { + return datastore.FeatureFlag{ + UID: row.ID, + FeatureKey: row.FeatureKey, + Enabled: row.Enabled, + CreatedAt: common.PgTimestamptzToTime(row.CreatedAt), + UpdatedAt: common.PgTimestamptzToTime(row.UpdatedAt), + } +} + +func loadRowToFeatureFlag(row repo.LoadFeatureFlagsRow) datastore.FeatureFlag { + return datastore.FeatureFlag{ + UID: row.ID, + FeatureKey: row.FeatureKey, + Enabled: row.Enabled, + CreatedAt: common.PgTimestamptzToTime(row.CreatedAt), + UpdatedAt: common.PgTimestamptzToTime(row.UpdatedAt), + } +} + +func rowToFeatureFlagOverride(row repo.FetchFeatureFlagOverrideRow) datastore.FeatureFlagOverride { + return datastore.FeatureFlagOverride{ + UID: row.ID, + FeatureFlagID: row.FeatureFlagID, + OwnerType: row.OwnerType, + OwnerID: row.OwnerID, + Enabled: row.Enabled, + EnabledAt: common.PgTimestamptzToNullTime(row.EnabledAt), + EnabledBy: common.PgTextToNullString(row.EnabledBy), + CreatedAt: common.PgTimestamptzToTime(row.CreatedAt), + UpdatedAt: common.PgTimestamptzToTime(row.UpdatedAt), + } +} + +func loadRowToFeatureFlagOverrideByOwner(row repo.LoadFeatureFlagOverridesByOwnerRow) datastore.FeatureFlagOverride { + return datastore.FeatureFlagOverride{ + UID: row.ID, + FeatureFlagID: row.FeatureFlagID, + OwnerType: row.OwnerType, + OwnerID: row.OwnerID, + Enabled: row.Enabled, + EnabledAt: common.PgTimestamptzToNullTime(row.EnabledAt), + EnabledBy: common.PgTextToNullString(row.EnabledBy), + CreatedAt: common.PgTimestamptzToTime(row.CreatedAt), + UpdatedAt: common.PgTimestamptzToTime(row.UpdatedAt), + } +} + +func loadRowToFeatureFlagOverrideByFF(row repo.LoadFeatureFlagOverridesByFeatureFlagRow) datastore.FeatureFlagOverride { + return datastore.FeatureFlagOverride{ + UID: row.ID, + FeatureFlagID: row.FeatureFlagID, + OwnerType: row.OwnerType, + OwnerID: row.OwnerID, + Enabled: row.Enabled, + EnabledAt: common.PgTimestamptzToNullTime(row.EnabledAt), + EnabledBy: common.PgTextToNullString(row.EnabledBy), + CreatedAt: common.PgTimestamptzToTime(row.CreatedAt), + UpdatedAt: common.PgTimestamptzToTime(row.UpdatedAt), + } +} + +func rowToEarlyAdopterFeature(row repo.FetchEarlyAdopterFeatureRow) datastore.EarlyAdopterFeature { + return datastore.EarlyAdopterFeature{ + UID: row.ID, + OrganisationID: row.OrganisationID, + FeatureKey: row.FeatureKey, + Enabled: row.Enabled, + EnabledBy: common.PgTextToNullString(row.EnabledBy), + EnabledAt: common.PgTimestamptzToNullTime(row.EnabledAt), + CreatedAt: common.PgTimestamptzToTime(row.CreatedAt), + UpdatedAt: common.PgTimestamptzToTime(row.UpdatedAt), + } +} + +func loadRowToEarlyAdopterFeature(row repo.LoadEarlyAdopterFeaturesByOrgRow) datastore.EarlyAdopterFeature { + return datastore.EarlyAdopterFeature{ + UID: row.ID, + OrganisationID: row.OrganisationID, + FeatureKey: row.FeatureKey, + Enabled: row.Enabled, + EnabledBy: common.PgTextToNullString(row.EnabledBy), + EnabledAt: common.PgTimestamptzToNullTime(row.EnabledAt), + CreatedAt: common.PgTimestamptzToTime(row.CreatedAt), + UpdatedAt: common.PgTimestamptzToTime(row.UpdatedAt), + } +} + +// ============================================================================ +// CRUD Methods — Feature Flags +// ============================================================================ + +// FetchFeatureFlagByKey fetches a feature flag by its key. +func (s *Service) FetchFeatureFlagByKey(ctx context.Context, key string) (*datastore.FeatureFlag, error) { + row, err := s.repo.FetchFeatureFlagByKey(ctx, common.StringToPgText(key)) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, datastore.ErrFeatureFlagNotFound + } + s.logger.Error("failed to fetch feature flag by key", "error", err) + return nil, err + } + + flag := rowToFeatureFlag(row) + return &flag, nil +} + +// FetchFeatureFlagByID fetches a feature flag by its ID. +func (s *Service) FetchFeatureFlagByID(ctx context.Context, id string) (*datastore.FeatureFlag, error) { + row, err := s.repo.FetchFeatureFlagByID(ctx, common.StringToPgText(id)) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, datastore.ErrFeatureFlagNotFound + } + s.logger.Error("failed to fetch feature flag by id", "error", err) + return nil, err + } + + flag := rowToFeatureFlagByID(row) + return &flag, nil +} + +// LoadFeatureFlags fetches all feature flags ordered by key. +func (s *Service) LoadFeatureFlags(ctx context.Context) ([]datastore.FeatureFlag, error) { + rows, err := s.repo.LoadFeatureFlags(ctx) + if err != nil { + s.logger.Error("failed to load feature flags", "error", err) + return nil, err + } + + flags := make([]datastore.FeatureFlag, 0, len(rows)) + for _, row := range rows { + flags = append(flags, loadRowToFeatureFlag(row)) + } + return flags, nil +} + +// UpdateFeatureFlag updates the enabled state of a feature flag. +func (s *Service) UpdateFeatureFlag(ctx context.Context, featureFlagID string, enabled bool) error { + err := s.repo.UpdateFeatureFlag(ctx, repo.UpdateFeatureFlagParams{ + ID: common.StringToPgText(featureFlagID), + Enabled: enabled, + }) + if err != nil { + s.logger.Error("failed to update feature flag", "error", err) + return err + } + return nil +} + +// ============================================================================ +// CRUD Methods — Feature Flag Overrides +// ============================================================================ + +// FetchFeatureFlagOverrideByOwner fetches a feature flag override for a specific owner. +func (s *Service) FetchFeatureFlagOverrideByOwner(ctx context.Context, ownerType, ownerID, featureFlagID string) (*datastore.FeatureFlagOverride, error) { + row, err := s.repo.FetchFeatureFlagOverride(ctx, repo.FetchFeatureFlagOverrideParams{ + OwnerType: common.StringToPgText(ownerType), + OwnerID: common.StringToPgText(ownerID), + FeatureFlagID: common.StringToPgText(featureFlagID), + }) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, datastore.ErrFeatureFlagOverrideNotFound + } + s.logger.Error("failed to fetch feature flag override", "error", err) + return nil, err + } + + override := rowToFeatureFlagOverride(row) + return &override, nil +} + +// LoadFeatureFlagOverridesByOwner fetches all overrides for a specific owner. +func (s *Service) LoadFeatureFlagOverridesByOwner(ctx context.Context, ownerType, ownerID string) ([]datastore.FeatureFlagOverride, error) { + rows, err := s.repo.LoadFeatureFlagOverridesByOwner(ctx, repo.LoadFeatureFlagOverridesByOwnerParams{ + OwnerType: common.StringToPgText(ownerType), + OwnerID: common.StringToPgText(ownerID), + }) + if err != nil { + s.logger.Error("failed to load feature flag overrides by owner", "error", err) + return nil, err + } + + overrides := make([]datastore.FeatureFlagOverride, 0, len(rows)) + for _, row := range rows { + overrides = append(overrides, loadRowToFeatureFlagOverrideByOwner(row)) + } + return overrides, nil +} + +// LoadFeatureFlagOverridesByFeatureFlag fetches all overrides for a specific feature flag. +func (s *Service) LoadFeatureFlagOverridesByFeatureFlag(ctx context.Context, featureFlagID string) ([]datastore.FeatureFlagOverride, error) { + rows, err := s.repo.LoadFeatureFlagOverridesByFeatureFlag(ctx, common.StringToPgText(featureFlagID)) + if err != nil { + s.logger.Error("failed to load feature flag overrides by feature flag", "error", err) + return nil, err + } + + overrides := make([]datastore.FeatureFlagOverride, 0, len(rows)) + for _, row := range rows { + overrides = append(overrides, loadRowToFeatureFlagOverrideByFF(row)) + } + return overrides, nil +} + +// UpsertFeatureFlagOverride creates or updates a feature flag override. +func (s *Service) UpsertFeatureFlagOverride(ctx context.Context, override *datastore.FeatureFlagOverride) error { + if override.UID == "" { + override.UID = ulid.Make().String() + } + + // Handle nullable enabledAt + enabledAt := common.NullTimeToPgTimestamptz(override.EnabledAt) + if !override.EnabledAt.Valid && override.Enabled { + enabledAt = common.TimeToPgTimestamptz(time.Now()) + } + + err := s.repo.UpsertFeatureFlagOverride(ctx, repo.UpsertFeatureFlagOverrideParams{ + ID: common.StringToPgText(override.UID), + FeatureFlagID: common.StringToPgText(override.FeatureFlagID), + OwnerType: common.StringToPgText(override.OwnerType), + OwnerID: common.StringToPgText(override.OwnerID), + Enabled: override.Enabled, + EnabledAt: enabledAt, + EnabledBy: common.NullStringToPgText(override.EnabledBy), + }) + if err != nil { + s.logger.Error("failed to upsert feature flag override", "error", err) + return err + } + return nil +} + +// DeleteFeatureFlagOverride deletes a feature flag override. +func (s *Service) DeleteFeatureFlagOverride(ctx context.Context, ownerType, ownerID, featureFlagID string) error { + err := s.repo.DeleteFeatureFlagOverride(ctx, repo.DeleteFeatureFlagOverrideParams{ + OwnerType: common.StringToPgText(ownerType), + OwnerID: common.StringToPgText(ownerID), + FeatureFlagID: common.StringToPgText(featureFlagID), + }) + if err != nil { + s.logger.Error("failed to delete feature flag override", "error", err) + return err + } + return nil +} + +// ============================================================================ +// CRUD Methods — Early Adopter Features +// ============================================================================ + +// GetEarlyAdopterFeature fetches an early adopter feature for an organisation. +// Named GetEarlyAdopterFeature to avoid conflict with the fflag.EarlyAdopterFeatureFetcher +// interface method FetchEarlyAdopterFeature. +func (s *Service) GetEarlyAdopterFeature(ctx context.Context, orgID, featureKey string) (*datastore.EarlyAdopterFeature, error) { + row, err := s.repo.FetchEarlyAdopterFeature(ctx, repo.FetchEarlyAdopterFeatureParams{ + OrganisationID: common.StringToPgText(orgID), + FeatureKey: common.StringToPgText(featureKey), + }) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, datastore.ErrEarlyAdopterFeatureNotFound + } + s.logger.Error("failed to fetch early adopter feature", "error", err) + return nil, err + } + + feature := rowToEarlyAdopterFeature(row) + return &feature, nil +} + +// LoadEarlyAdopterFeaturesByOrg fetches all early adopter features for an organisation. +func (s *Service) LoadEarlyAdopterFeaturesByOrg(ctx context.Context, orgID string) ([]datastore.EarlyAdopterFeature, error) { + rows, err := s.repo.LoadEarlyAdopterFeaturesByOrg(ctx, common.StringToPgText(orgID)) + if err != nil { + s.logger.Error("failed to load early adopter features by org", "error", err) + return nil, err + } + + features := make([]datastore.EarlyAdopterFeature, 0, len(rows)) + for _, row := range rows { + features = append(features, loadRowToEarlyAdopterFeature(row)) + } + return features, nil +} + +// UpsertEarlyAdopterFeature creates or updates an early adopter feature. +func (s *Service) UpsertEarlyAdopterFeature(ctx context.Context, feature *datastore.EarlyAdopterFeature) error { + if feature.UID == "" { + feature.UID = ulid.Make().String() + } + + // Handle nullable enabledAt + enabledAt := common.NullTimeToPgTimestamptz(feature.EnabledAt) + if !feature.EnabledAt.Valid && feature.Enabled { + enabledAt = common.TimeToPgTimestamptz(time.Now()) + } + + err := s.repo.UpsertEarlyAdopterFeature(ctx, repo.UpsertEarlyAdopterFeatureParams{ + ID: common.StringToPgText(feature.UID), + OrganisationID: common.StringToPgText(feature.OrganisationID), + FeatureKey: common.StringToPgText(feature.FeatureKey), + Enabled: feature.Enabled, + EnabledBy: common.NullStringToPgText(feature.EnabledBy), + EnabledAt: enabledAt, + }) + if err != nil { + s.logger.Error("failed to upsert early adopter feature", "error", err) + return err + } + return nil +} + +// DeleteEarlyAdopterFeature deletes an early adopter feature. +func (s *Service) DeleteEarlyAdopterFeature(ctx context.Context, orgID, featureKey string) error { + err := s.repo.DeleteEarlyAdopterFeature(ctx, repo.DeleteEarlyAdopterFeatureParams{ + OrganisationID: common.StringToPgText(orgID), + FeatureKey: common.StringToPgText(featureKey), + }) + if err != nil { + s.logger.Error("failed to delete early adopter feature", "error", err) + return err + } + return nil +} + +// ============================================================================ +// Interface Methods — fflag.FeatureFlagFetcher +// ============================================================================ + +// FetchFeatureFlag implements fflag.FeatureFlagFetcher. +// It fetches a feature flag by key and returns the info needed by the fflag package. +func (s *Service) FetchFeatureFlag(ctx context.Context, key string) (*fflag.FeatureFlagInfo, error) { + flag, err := s.FetchFeatureFlagByKey(ctx, key) + if err != nil { + return nil, err + } + + return &fflag.FeatureFlagInfo{ + UID: flag.UID, + Enabled: flag.Enabled, + }, nil +} + +// FetchFeatureFlagOverride implements fflag.FeatureFlagFetcher. +// It fetches a feature flag override and returns the info needed by the fflag package. +func (s *Service) FetchFeatureFlagOverride(ctx context.Context, ownerType, ownerID, featureFlagID string) (*fflag.FeatureFlagOverrideInfo, error) { + override, err := s.FetchFeatureFlagOverrideByOwner(ctx, ownerType, ownerID, featureFlagID) + if err != nil { + return nil, err + } + + return &fflag.FeatureFlagOverrideInfo{ + Enabled: override.Enabled, + }, nil +} + +// ============================================================================ +// Interface Methods — fflag.EarlyAdopterFeatureFetcher +// ============================================================================ + +// FetchEarlyAdopterFeature implements fflag.EarlyAdopterFeatureFetcher. +// It fetches an early adopter feature and returns the info needed by the fflag package. +func (s *Service) FetchEarlyAdopterFeature(ctx context.Context, orgID, featureKey string) (*fflag.EarlyAdopterFeatureInfo, error) { + feature, err := s.GetEarlyAdopterFeature(ctx, orgID, featureKey) + if err != nil { + return nil, err + } + + return &fflag.EarlyAdopterFeatureInfo{ + Enabled: feature.Enabled, + }, nil +} diff --git a/internal/feature_flags/impl_test.go b/internal/feature_flags/impl_test.go new file mode 100644 index 0000000000..5c6fafb871 --- /dev/null +++ b/internal/feature_flags/impl_test.go @@ -0,0 +1,355 @@ +package feature_flags + +import ( + "context" + "fmt" + "os" + "testing" + + "github.com/oklog/ulid/v2" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + "github.com/frain-dev/convoy/config" + "github.com/frain-dev/convoy/database" + "github.com/frain-dev/convoy/database/hooks" + "github.com/frain-dev/convoy/database/postgres" + "github.com/frain-dev/convoy/datastore" + "github.com/frain-dev/convoy/internal/organisations" + "github.com/frain-dev/convoy/internal/pkg/keys" + "github.com/frain-dev/convoy/internal/users" + log "github.com/frain-dev/convoy/pkg/logger" + "github.com/frain-dev/convoy/testenv" +) + +var testEnv *testenv.Environment + +func TestMain(m *testing.M) { + res, cleanup, err := testenv.Launch(context.Background()) + if err != nil { + panic(err) + } + testEnv = res + + code := m.Run() + + if err := cleanup(); err != nil { + fmt.Printf("failed to cleanup: %v\n", err) + } + + os.Exit(code) +} + +func setupTestDB(t *testing.T) (database.Database, *Service) { + t.Helper() + + err := config.LoadConfig("") + require.NoError(t, err) + + conn, err := testEnv.CloneTestDatabase(t, "convoy") + require.NoError(t, err) + + dbHooks := hooks.Init() + dbHooks.RegisterHook(datastore.EndpointCreated, func(ctx context.Context, data interface{}, changelog interface{}) {}) + + db := postgres.NewFromConnection(conn) + + km, err := keys.NewLocalKeyManager("test") + require.NoError(t, err) + + if km.IsSet() { + _, err = km.GetCurrentKeyFromCache() + require.NoError(t, err) + } + + err = keys.Set(km) + require.NoError(t, err) + + logger := log.New("convoy", log.LevelInfo) + return db, New(logger, db) +} + +func seedOrg(t *testing.T, db database.Database) *datastore.Organisation { + t.Helper() + ctx := context.Background() + logger := log.New("convoy", log.LevelInfo) + + userRepo := users.New(logger, db) + user := &datastore.User{ + UID: ulid.Make().String(), + FirstName: "Test", + LastName: "User", + Email: fmt.Sprintf("test-%s@example.com", ulid.Make().String()), + } + require.NoError(t, userRepo.CreateUser(ctx, user)) + + orgRepo := organisations.New(logger, db) + org := &datastore.Organisation{ + UID: ulid.Make().String(), + Name: "Test Org", + OwnerID: user.UID, + } + require.NoError(t, orgRepo.CreateOrganisation(ctx, org)) + + return org +} + +// ============================================================================ +// Feature Flag Tests +// ============================================================================ + +func TestLoadFeatureFlags(t *testing.T) { + _, svc := setupTestDB(t) + ctx := context.Background() + + // The feature_flags table is seeded by migrations, so it should have entries + flags, err := svc.LoadFeatureFlags(ctx) + require.NoError(t, err) + require.NotNil(t, flags) +} + +func TestFetchFeatureFlagByKey(t *testing.T) { + _, svc := setupTestDB(t) + ctx := context.Background() + + t.Run("existing flag", func(t *testing.T) { + // circuit-breaker is a known seeded feature flag + flag, err := svc.FetchFeatureFlagByKey(ctx, "circuit-breaker") + require.NoError(t, err) + require.Equal(t, "circuit-breaker", flag.FeatureKey) + }) + + t.Run("not found", func(t *testing.T) { + _, err := svc.FetchFeatureFlagByKey(ctx, "nonexistent-flag") + require.Equal(t, datastore.ErrFeatureFlagNotFound, err) + }) +} + +func TestFetchFeatureFlagByID(t *testing.T) { + _, svc := setupTestDB(t) + ctx := context.Background() + + // Fetch a known flag first to get its ID + flag, err := svc.FetchFeatureFlagByKey(ctx, "circuit-breaker") + require.NoError(t, err) + + t.Run("existing flag", func(t *testing.T) { + fetched, err := svc.FetchFeatureFlagByID(ctx, flag.UID) + require.NoError(t, err) + require.Equal(t, flag.UID, fetched.UID) + require.Equal(t, "circuit-breaker", fetched.FeatureKey) + }) + + t.Run("not found", func(t *testing.T) { + _, err := svc.FetchFeatureFlagByID(ctx, "nonexistent-id") + require.Equal(t, datastore.ErrFeatureFlagNotFound, err) + }) +} + +func TestUpdateFeatureFlag(t *testing.T) { + _, svc := setupTestDB(t) + ctx := context.Background() + + flag, err := svc.FetchFeatureFlagByKey(ctx, "circuit-breaker") + require.NoError(t, err) + + // Toggle enabled + newEnabled := !flag.Enabled + require.NoError(t, svc.UpdateFeatureFlag(ctx, flag.UID, newEnabled)) + + updated, err := svc.FetchFeatureFlagByID(ctx, flag.UID) + require.NoError(t, err) + require.Equal(t, newEnabled, updated.Enabled) +} + +// ============================================================================ +// Feature Flag Override Tests +// ============================================================================ + +func TestUpsertAndFetchFeatureFlagOverride(t *testing.T) { + db, svc := setupTestDB(t) + org := seedOrg(t, db) + ctx := context.Background() + + flag, err := svc.FetchFeatureFlagByKey(ctx, "circuit-breaker") + require.NoError(t, err) + + override := &datastore.FeatureFlagOverride{ + FeatureFlagID: flag.UID, + OwnerType: "organisation", + OwnerID: org.UID, + Enabled: true, + EnabledBy: null.StringFrom("test-user"), + } + + require.NoError(t, svc.UpsertFeatureFlagOverride(ctx, override)) + + fetched, err := svc.FetchFeatureFlagOverrideByOwner(ctx, "organisation", org.UID, flag.UID) + require.NoError(t, err) + require.Equal(t, true, fetched.Enabled) + require.Equal(t, "organisation", fetched.OwnerType) + require.Equal(t, org.UID, fetched.OwnerID) +} + +func TestLoadFeatureFlagOverridesByOwner(t *testing.T) { + db, svc := setupTestDB(t) + org := seedOrg(t, db) + ctx := context.Background() + + flag, err := svc.FetchFeatureFlagByKey(ctx, "circuit-breaker") + require.NoError(t, err) + + override := &datastore.FeatureFlagOverride{ + FeatureFlagID: flag.UID, + OwnerType: "organisation", + OwnerID: org.UID, + Enabled: true, + } + require.NoError(t, svc.UpsertFeatureFlagOverride(ctx, override)) + + overrides, err := svc.LoadFeatureFlagOverridesByOwner(ctx, "organisation", org.UID) + require.NoError(t, err) + require.GreaterOrEqual(t, len(overrides), 1) +} + +func TestDeleteFeatureFlagOverride(t *testing.T) { + db, svc := setupTestDB(t) + org := seedOrg(t, db) + ctx := context.Background() + + flag, err := svc.FetchFeatureFlagByKey(ctx, "circuit-breaker") + require.NoError(t, err) + + override := &datastore.FeatureFlagOverride{ + FeatureFlagID: flag.UID, + OwnerType: "organisation", + OwnerID: org.UID, + Enabled: true, + } + require.NoError(t, svc.UpsertFeatureFlagOverride(ctx, override)) + + require.NoError(t, svc.DeleteFeatureFlagOverride(ctx, "organisation", org.UID, flag.UID)) + + _, err = svc.FetchFeatureFlagOverrideByOwner(ctx, "organisation", org.UID, flag.UID) + require.Equal(t, datastore.ErrFeatureFlagOverrideNotFound, err) +} + +// ============================================================================ +// Early Adopter Feature Tests +// ============================================================================ + +func TestUpsertAndGetEarlyAdopterFeature(t *testing.T) { + db, svc := setupTestDB(t) + org := seedOrg(t, db) + ctx := context.Background() + + feature := &datastore.EarlyAdopterFeature{ + OrganisationID: org.UID, + FeatureKey: "mtls", + Enabled: true, + EnabledBy: null.StringFrom("test-user"), + } + + require.NoError(t, svc.UpsertEarlyAdopterFeature(ctx, feature)) + + fetched, err := svc.GetEarlyAdopterFeature(ctx, org.UID, "mtls") + require.NoError(t, err) + require.Equal(t, true, fetched.Enabled) + require.Equal(t, org.UID, fetched.OrganisationID) + require.Equal(t, "mtls", fetched.FeatureKey) +} + +func TestLoadEarlyAdopterFeaturesByOrg(t *testing.T) { + db, svc := setupTestDB(t) + org := seedOrg(t, db) + ctx := context.Background() + + require.NoError(t, svc.UpsertEarlyAdopterFeature(ctx, &datastore.EarlyAdopterFeature{ + OrganisationID: org.UID, + FeatureKey: "mtls", + Enabled: true, + })) + + require.NoError(t, svc.UpsertEarlyAdopterFeature(ctx, &datastore.EarlyAdopterFeature{ + OrganisationID: org.UID, + FeatureKey: "oauth-token-exchange", + Enabled: false, + })) + + features, err := svc.LoadEarlyAdopterFeaturesByOrg(ctx, org.UID) + require.NoError(t, err) + require.Equal(t, 2, len(features)) +} + +func TestDeleteEarlyAdopterFeature(t *testing.T) { + db, svc := setupTestDB(t) + org := seedOrg(t, db) + ctx := context.Background() + + require.NoError(t, svc.UpsertEarlyAdopterFeature(ctx, &datastore.EarlyAdopterFeature{ + OrganisationID: org.UID, + FeatureKey: "mtls", + Enabled: true, + })) + + require.NoError(t, svc.DeleteEarlyAdopterFeature(ctx, org.UID, "mtls")) + + _, err := svc.GetEarlyAdopterFeature(ctx, org.UID, "mtls") + require.Equal(t, datastore.ErrEarlyAdopterFeatureNotFound, err) +} + +// ============================================================================ +// Interface Method Tests (fflag.FeatureFlagFetcher, fflag.EarlyAdopterFeatureFetcher) +// ============================================================================ + +func TestFetchFeatureFlag_Interface(t *testing.T) { + _, svc := setupTestDB(t) + ctx := context.Background() + + info, err := svc.FetchFeatureFlag(ctx, "circuit-breaker") + require.NoError(t, err) + require.NotEmpty(t, info.UID) + + _, err = svc.FetchFeatureFlag(ctx, "nonexistent") + require.Equal(t, datastore.ErrFeatureFlagNotFound, err) +} + +func TestFetchFeatureFlagOverride_Interface(t *testing.T) { + db, svc := setupTestDB(t) + org := seedOrg(t, db) + ctx := context.Background() + + flag, err := svc.FetchFeatureFlagByKey(ctx, "circuit-breaker") + require.NoError(t, err) + + override := &datastore.FeatureFlagOverride{ + FeatureFlagID: flag.UID, + OwnerType: "organisation", + OwnerID: org.UID, + Enabled: true, + } + require.NoError(t, svc.UpsertFeatureFlagOverride(ctx, override)) + + info, err := svc.FetchFeatureFlagOverride(ctx, "organisation", org.UID, flag.UID) + require.NoError(t, err) + require.True(t, info.Enabled) +} + +func TestFetchEarlyAdopterFeature_Interface(t *testing.T) { + db, svc := setupTestDB(t) + org := seedOrg(t, db) + ctx := context.Background() + + require.NoError(t, svc.UpsertEarlyAdopterFeature(ctx, &datastore.EarlyAdopterFeature{ + OrganisationID: org.UID, + FeatureKey: "mtls", + Enabled: true, + })) + + info, err := svc.FetchEarlyAdopterFeature(ctx, org.UID, "mtls") + require.NoError(t, err) + require.True(t, info.Enabled) + + _, err = svc.FetchEarlyAdopterFeature(ctx, org.UID, "nonexistent") + require.Equal(t, datastore.ErrEarlyAdopterFeatureNotFound, err) +} diff --git a/internal/feature_flags/queries.sql b/internal/feature_flags/queries.sql new file mode 100644 index 0000000000..47420409c1 --- /dev/null +++ b/internal/feature_flags/queries.sql @@ -0,0 +1,80 @@ +-- Feature Flags Repository SQL Queries +-- Tables: convoy.feature_flags, convoy.feature_flag_overrides, convoy.early_adopter_features + +-- ============================================================================ +-- convoy.feature_flags +-- ============================================================================ + +-- name: FetchFeatureFlagByKey :one +SELECT id, feature_key, enabled, created_at, updated_at +FROM convoy.feature_flags +WHERE feature_key = @feature_key; + +-- name: FetchFeatureFlagByID :one +SELECT id, feature_key, enabled, created_at, updated_at +FROM convoy.feature_flags +WHERE id = @id; + +-- name: LoadFeatureFlags :many +SELECT id, feature_key, enabled, created_at, updated_at +FROM convoy.feature_flags +ORDER BY feature_key; + +-- name: UpdateFeatureFlag :exec +UPDATE convoy.feature_flags +SET enabled = @enabled, updated_at = NOW() +WHERE id = @id; + +-- ============================================================================ +-- convoy.feature_flag_overrides +-- ============================================================================ + +-- name: FetchFeatureFlagOverride :one +SELECT id, feature_flag_id, owner_type, owner_id, enabled, enabled_at, enabled_by, created_at, updated_at +FROM convoy.feature_flag_overrides +WHERE owner_type = @owner_type AND owner_id = @owner_id AND feature_flag_id = @feature_flag_id; + +-- name: LoadFeatureFlagOverridesByOwner :many +SELECT id, feature_flag_id, owner_type, owner_id, enabled, enabled_at, enabled_by, created_at, updated_at +FROM convoy.feature_flag_overrides +WHERE owner_type = @owner_type AND owner_id = @owner_id; + +-- name: LoadFeatureFlagOverridesByFeatureFlag :many +SELECT id, feature_flag_id, owner_type, owner_id, enabled, enabled_at, enabled_by, created_at, updated_at +FROM convoy.feature_flag_overrides +WHERE feature_flag_id = @feature_flag_id; + +-- name: UpsertFeatureFlagOverride :exec +INSERT INTO convoy.feature_flag_overrides (id, feature_flag_id, owner_type, owner_id, enabled, enabled_at, enabled_by) +VALUES (@id, @feature_flag_id, @owner_type, @owner_id, @enabled, @enabled_at, @enabled_by) +ON CONFLICT (owner_type, owner_id, feature_flag_id) +DO UPDATE SET enabled = @enabled, enabled_at = @enabled_at, enabled_by = @enabled_by, updated_at = NOW(); + +-- name: DeleteFeatureFlagOverride :exec +DELETE FROM convoy.feature_flag_overrides +WHERE owner_type = @owner_type AND owner_id = @owner_id AND feature_flag_id = @feature_flag_id; + +-- ============================================================================ +-- convoy.early_adopter_features +-- ============================================================================ + +-- name: FetchEarlyAdopterFeature :one +SELECT id, organisation_id, feature_key, enabled, enabled_by, enabled_at, created_at, updated_at +FROM convoy.early_adopter_features +WHERE organisation_id = @organisation_id AND feature_key = @feature_key; + +-- name: LoadEarlyAdopterFeaturesByOrg :many +SELECT id, organisation_id, feature_key, enabled, enabled_by, enabled_at, created_at, updated_at +FROM convoy.early_adopter_features +WHERE organisation_id = @organisation_id +ORDER BY feature_key; + +-- name: UpsertEarlyAdopterFeature :exec +INSERT INTO convoy.early_adopter_features (id, organisation_id, feature_key, enabled, enabled_by, enabled_at) +VALUES (@id, @organisation_id, @feature_key, @enabled, @enabled_by, @enabled_at) +ON CONFLICT (organisation_id, feature_key) +DO UPDATE SET enabled = @enabled, enabled_by = @enabled_by, enabled_at = @enabled_at, updated_at = NOW(); + +-- name: DeleteEarlyAdopterFeature :exec +DELETE FROM convoy.early_adopter_features +WHERE organisation_id = @organisation_id AND feature_key = @feature_key; diff --git a/internal/feature_flags/repo/db.go b/internal/feature_flags/repo/db.go new file mode 100644 index 0000000000..71f6cab52b --- /dev/null +++ b/internal/feature_flags/repo/db.go @@ -0,0 +1,32 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.30.0 + +package repo + +import ( + "context" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" +) + +type DBTX interface { + Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error) + Query(context.Context, string, ...interface{}) (pgx.Rows, error) + QueryRow(context.Context, string, ...interface{}) pgx.Row +} + +func New(db DBTX) *Queries { + return &Queries{db: db} +} + +type Queries struct { + db DBTX +} + +func (q *Queries) WithTx(tx pgx.Tx) *Queries { + return &Queries{ + db: tx, + } +} diff --git a/internal/feature_flags/repo/models.go b/internal/feature_flags/repo/models.go new file mode 100644 index 0000000000..434464690a --- /dev/null +++ b/internal/feature_flags/repo/models.go @@ -0,0 +1,5 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.30.0 + +package repo diff --git a/internal/feature_flags/repo/querier.go b/internal/feature_flags/repo/querier.go new file mode 100644 index 0000000000..53c46a69d3 --- /dev/null +++ b/internal/feature_flags/repo/querier.go @@ -0,0 +1,29 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.30.0 + +package repo + +import ( + "context" + + "github.com/jackc/pgx/v5/pgtype" +) + +type Querier interface { + DeleteEarlyAdopterFeature(ctx context.Context, arg DeleteEarlyAdopterFeatureParams) error + DeleteFeatureFlagOverride(ctx context.Context, arg DeleteFeatureFlagOverrideParams) error + FetchEarlyAdopterFeature(ctx context.Context, arg FetchEarlyAdopterFeatureParams) (FetchEarlyAdopterFeatureRow, error) + FetchFeatureFlagByID(ctx context.Context, id pgtype.Text) (FetchFeatureFlagByIDRow, error) + FetchFeatureFlagByKey(ctx context.Context, featureKey pgtype.Text) (FetchFeatureFlagByKeyRow, error) + FetchFeatureFlagOverride(ctx context.Context, arg FetchFeatureFlagOverrideParams) (FetchFeatureFlagOverrideRow, error) + LoadEarlyAdopterFeaturesByOrg(ctx context.Context, organisationID pgtype.Text) ([]LoadEarlyAdopterFeaturesByOrgRow, error) + LoadFeatureFlagOverridesByFeatureFlag(ctx context.Context, featureFlagID pgtype.Text) ([]LoadFeatureFlagOverridesByFeatureFlagRow, error) + LoadFeatureFlagOverridesByOwner(ctx context.Context, arg LoadFeatureFlagOverridesByOwnerParams) ([]LoadFeatureFlagOverridesByOwnerRow, error) + LoadFeatureFlags(ctx context.Context) ([]LoadFeatureFlagsRow, error) + UpdateFeatureFlag(ctx context.Context, arg UpdateFeatureFlagParams) error + UpsertEarlyAdopterFeature(ctx context.Context, arg UpsertEarlyAdopterFeatureParams) error + UpsertFeatureFlagOverride(ctx context.Context, arg UpsertFeatureFlagOverrideParams) error +} + +var _ Querier = (*Queries)(nil) diff --git a/internal/feature_flags/repo/queries.sql.go b/internal/feature_flags/repo/queries.sql.go new file mode 100644 index 0000000000..2e3c35a34f --- /dev/null +++ b/internal/feature_flags/repo/queries.sql.go @@ -0,0 +1,450 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.30.0 +// source: queries.sql + +package repo + +import ( + "context" + + "github.com/jackc/pgx/v5/pgtype" +) + +// ============================================================================ +// convoy.feature_flags +// ============================================================================ + +const fetchFeatureFlagByKey = `-- name: FetchFeatureFlagByKey :one +SELECT id, feature_key, enabled, created_at, updated_at +FROM convoy.feature_flags +WHERE feature_key = $1 +` + +type FetchFeatureFlagByKeyRow struct { + ID string + FeatureKey string + Enabled bool + CreatedAt pgtype.Timestamptz + UpdatedAt pgtype.Timestamptz +} + +func (q *Queries) FetchFeatureFlagByKey(ctx context.Context, featureKey pgtype.Text) (FetchFeatureFlagByKeyRow, error) { + row := q.db.QueryRow(ctx, fetchFeatureFlagByKey, featureKey) + var i FetchFeatureFlagByKeyRow + err := row.Scan( + &i.ID, + &i.FeatureKey, + &i.Enabled, + &i.CreatedAt, + &i.UpdatedAt, + ) + return i, err +} + +const fetchFeatureFlagByID = `-- name: FetchFeatureFlagByID :one +SELECT id, feature_key, enabled, created_at, updated_at +FROM convoy.feature_flags +WHERE id = $1 +` + +type FetchFeatureFlagByIDRow struct { + ID string + FeatureKey string + Enabled bool + CreatedAt pgtype.Timestamptz + UpdatedAt pgtype.Timestamptz +} + +func (q *Queries) FetchFeatureFlagByID(ctx context.Context, id pgtype.Text) (FetchFeatureFlagByIDRow, error) { + row := q.db.QueryRow(ctx, fetchFeatureFlagByID, id) + var i FetchFeatureFlagByIDRow + err := row.Scan( + &i.ID, + &i.FeatureKey, + &i.Enabled, + &i.CreatedAt, + &i.UpdatedAt, + ) + return i, err +} + +const loadFeatureFlags = `-- name: LoadFeatureFlags :many +SELECT id, feature_key, enabled, created_at, updated_at +FROM convoy.feature_flags +ORDER BY feature_key +` + +type LoadFeatureFlagsRow struct { + ID string + FeatureKey string + Enabled bool + CreatedAt pgtype.Timestamptz + UpdatedAt pgtype.Timestamptz +} + +func (q *Queries) LoadFeatureFlags(ctx context.Context) ([]LoadFeatureFlagsRow, error) { + rows, err := q.db.Query(ctx, loadFeatureFlags) + if err != nil { + return nil, err + } + defer rows.Close() + var items []LoadFeatureFlagsRow + for rows.Next() { + var i LoadFeatureFlagsRow + if err := rows.Scan( + &i.ID, + &i.FeatureKey, + &i.Enabled, + &i.CreatedAt, + &i.UpdatedAt, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const updateFeatureFlag = `-- name: UpdateFeatureFlag :exec +UPDATE convoy.feature_flags +SET enabled = $1, updated_at = NOW() +WHERE id = $2 +` + +type UpdateFeatureFlagParams struct { + Enabled bool + ID pgtype.Text +} + +func (q *Queries) UpdateFeatureFlag(ctx context.Context, arg UpdateFeatureFlagParams) error { + _, err := q.db.Exec(ctx, updateFeatureFlag, arg.Enabled, arg.ID) + return err +} + +// ============================================================================ +// convoy.feature_flag_overrides +// ============================================================================ + +const fetchFeatureFlagOverride = `-- name: FetchFeatureFlagOverride :one +SELECT id, feature_flag_id, owner_type, owner_id, enabled, enabled_at, enabled_by, created_at, updated_at +FROM convoy.feature_flag_overrides +WHERE owner_type = $1 AND owner_id = $2 AND feature_flag_id = $3 +` + +type FetchFeatureFlagOverrideParams struct { + OwnerType pgtype.Text + OwnerID pgtype.Text + FeatureFlagID pgtype.Text +} + +type FetchFeatureFlagOverrideRow struct { + ID string + FeatureFlagID string + OwnerType string + OwnerID string + Enabled bool + EnabledAt pgtype.Timestamptz + EnabledBy pgtype.Text + CreatedAt pgtype.Timestamptz + UpdatedAt pgtype.Timestamptz +} + +func (q *Queries) FetchFeatureFlagOverride(ctx context.Context, arg FetchFeatureFlagOverrideParams) (FetchFeatureFlagOverrideRow, error) { + row := q.db.QueryRow(ctx, fetchFeatureFlagOverride, arg.OwnerType, arg.OwnerID, arg.FeatureFlagID) + var i FetchFeatureFlagOverrideRow + err := row.Scan( + &i.ID, + &i.FeatureFlagID, + &i.OwnerType, + &i.OwnerID, + &i.Enabled, + &i.EnabledAt, + &i.EnabledBy, + &i.CreatedAt, + &i.UpdatedAt, + ) + return i, err +} + +const loadFeatureFlagOverridesByOwner = `-- name: LoadFeatureFlagOverridesByOwner :many +SELECT id, feature_flag_id, owner_type, owner_id, enabled, enabled_at, enabled_by, created_at, updated_at +FROM convoy.feature_flag_overrides +WHERE owner_type = $1 AND owner_id = $2 +` + +type LoadFeatureFlagOverridesByOwnerParams struct { + OwnerType pgtype.Text + OwnerID pgtype.Text +} + +type LoadFeatureFlagOverridesByOwnerRow struct { + ID string + FeatureFlagID string + OwnerType string + OwnerID string + Enabled bool + EnabledAt pgtype.Timestamptz + EnabledBy pgtype.Text + CreatedAt pgtype.Timestamptz + UpdatedAt pgtype.Timestamptz +} + +func (q *Queries) LoadFeatureFlagOverridesByOwner(ctx context.Context, arg LoadFeatureFlagOverridesByOwnerParams) ([]LoadFeatureFlagOverridesByOwnerRow, error) { + rows, err := q.db.Query(ctx, loadFeatureFlagOverridesByOwner, arg.OwnerType, arg.OwnerID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []LoadFeatureFlagOverridesByOwnerRow + for rows.Next() { + var i LoadFeatureFlagOverridesByOwnerRow + if err := rows.Scan( + &i.ID, + &i.FeatureFlagID, + &i.OwnerType, + &i.OwnerID, + &i.Enabled, + &i.EnabledAt, + &i.EnabledBy, + &i.CreatedAt, + &i.UpdatedAt, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const loadFeatureFlagOverridesByFeatureFlag = `-- name: LoadFeatureFlagOverridesByFeatureFlag :many +SELECT id, feature_flag_id, owner_type, owner_id, enabled, enabled_at, enabled_by, created_at, updated_at +FROM convoy.feature_flag_overrides +WHERE feature_flag_id = $1 +` + +type LoadFeatureFlagOverridesByFeatureFlagRow struct { + ID string + FeatureFlagID string + OwnerType string + OwnerID string + Enabled bool + EnabledAt pgtype.Timestamptz + EnabledBy pgtype.Text + CreatedAt pgtype.Timestamptz + UpdatedAt pgtype.Timestamptz +} + +func (q *Queries) LoadFeatureFlagOverridesByFeatureFlag(ctx context.Context, featureFlagID pgtype.Text) ([]LoadFeatureFlagOverridesByFeatureFlagRow, error) { + rows, err := q.db.Query(ctx, loadFeatureFlagOverridesByFeatureFlag, featureFlagID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []LoadFeatureFlagOverridesByFeatureFlagRow + for rows.Next() { + var i LoadFeatureFlagOverridesByFeatureFlagRow + if err := rows.Scan( + &i.ID, + &i.FeatureFlagID, + &i.OwnerType, + &i.OwnerID, + &i.Enabled, + &i.EnabledAt, + &i.EnabledBy, + &i.CreatedAt, + &i.UpdatedAt, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const upsertFeatureFlagOverride = `-- name: UpsertFeatureFlagOverride :exec +INSERT INTO convoy.feature_flag_overrides (id, feature_flag_id, owner_type, owner_id, enabled, enabled_at, enabled_by) +VALUES ($1, $2, $3, $4, $5, $6, $7) +ON CONFLICT (owner_type, owner_id, feature_flag_id) +DO UPDATE SET enabled = $5, enabled_at = $6, enabled_by = $7, updated_at = NOW() +` + +type UpsertFeatureFlagOverrideParams struct { + ID pgtype.Text + FeatureFlagID pgtype.Text + OwnerType pgtype.Text + OwnerID pgtype.Text + Enabled bool + EnabledAt pgtype.Timestamptz + EnabledBy pgtype.Text +} + +func (q *Queries) UpsertFeatureFlagOverride(ctx context.Context, arg UpsertFeatureFlagOverrideParams) error { + _, err := q.db.Exec(ctx, upsertFeatureFlagOverride, + arg.ID, + arg.FeatureFlagID, + arg.OwnerType, + arg.OwnerID, + arg.Enabled, + arg.EnabledAt, + arg.EnabledBy, + ) + return err +} + +const deleteFeatureFlagOverride = `-- name: DeleteFeatureFlagOverride :exec +DELETE FROM convoy.feature_flag_overrides +WHERE owner_type = $1 AND owner_id = $2 AND feature_flag_id = $3 +` + +type DeleteFeatureFlagOverrideParams struct { + OwnerType pgtype.Text + OwnerID pgtype.Text + FeatureFlagID pgtype.Text +} + +func (q *Queries) DeleteFeatureFlagOverride(ctx context.Context, arg DeleteFeatureFlagOverrideParams) error { + _, err := q.db.Exec(ctx, deleteFeatureFlagOverride, arg.OwnerType, arg.OwnerID, arg.FeatureFlagID) + return err +} + +// ============================================================================ +// convoy.early_adopter_features +// ============================================================================ + +const fetchEarlyAdopterFeature = `-- name: FetchEarlyAdopterFeature :one +SELECT id, organisation_id, feature_key, enabled, enabled_by, enabled_at, created_at, updated_at +FROM convoy.early_adopter_features +WHERE organisation_id = $1 AND feature_key = $2 +` + +type FetchEarlyAdopterFeatureParams struct { + OrganisationID pgtype.Text + FeatureKey pgtype.Text +} + +type FetchEarlyAdopterFeatureRow struct { + ID string + OrganisationID string + FeatureKey string + Enabled bool + EnabledBy pgtype.Text + EnabledAt pgtype.Timestamptz + CreatedAt pgtype.Timestamptz + UpdatedAt pgtype.Timestamptz +} + +func (q *Queries) FetchEarlyAdopterFeature(ctx context.Context, arg FetchEarlyAdopterFeatureParams) (FetchEarlyAdopterFeatureRow, error) { + row := q.db.QueryRow(ctx, fetchEarlyAdopterFeature, arg.OrganisationID, arg.FeatureKey) + var i FetchEarlyAdopterFeatureRow + err := row.Scan( + &i.ID, + &i.OrganisationID, + &i.FeatureKey, + &i.Enabled, + &i.EnabledBy, + &i.EnabledAt, + &i.CreatedAt, + &i.UpdatedAt, + ) + return i, err +} + +const loadEarlyAdopterFeaturesByOrg = `-- name: LoadEarlyAdopterFeaturesByOrg :many +SELECT id, organisation_id, feature_key, enabled, enabled_by, enabled_at, created_at, updated_at +FROM convoy.early_adopter_features +WHERE organisation_id = $1 +ORDER BY feature_key +` + +type LoadEarlyAdopterFeaturesByOrgRow struct { + ID string + OrganisationID string + FeatureKey string + Enabled bool + EnabledBy pgtype.Text + EnabledAt pgtype.Timestamptz + CreatedAt pgtype.Timestamptz + UpdatedAt pgtype.Timestamptz +} + +func (q *Queries) LoadEarlyAdopterFeaturesByOrg(ctx context.Context, organisationID pgtype.Text) ([]LoadEarlyAdopterFeaturesByOrgRow, error) { + rows, err := q.db.Query(ctx, loadEarlyAdopterFeaturesByOrg, organisationID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []LoadEarlyAdopterFeaturesByOrgRow + for rows.Next() { + var i LoadEarlyAdopterFeaturesByOrgRow + if err := rows.Scan( + &i.ID, + &i.OrganisationID, + &i.FeatureKey, + &i.Enabled, + &i.EnabledBy, + &i.EnabledAt, + &i.CreatedAt, + &i.UpdatedAt, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const upsertEarlyAdopterFeature = `-- name: UpsertEarlyAdopterFeature :exec +INSERT INTO convoy.early_adopter_features (id, organisation_id, feature_key, enabled, enabled_by, enabled_at) +VALUES ($1, $2, $3, $4, $5, $6) +ON CONFLICT (organisation_id, feature_key) +DO UPDATE SET enabled = $4, enabled_by = $5, enabled_at = $6, updated_at = NOW() +` + +type UpsertEarlyAdopterFeatureParams struct { + ID pgtype.Text + OrganisationID pgtype.Text + FeatureKey pgtype.Text + Enabled bool + EnabledBy pgtype.Text + EnabledAt pgtype.Timestamptz +} + +func (q *Queries) UpsertEarlyAdopterFeature(ctx context.Context, arg UpsertEarlyAdopterFeatureParams) error { + _, err := q.db.Exec(ctx, upsertEarlyAdopterFeature, + arg.ID, + arg.OrganisationID, + arg.FeatureKey, + arg.Enabled, + arg.EnabledBy, + arg.EnabledAt, + ) + return err +} + +const deleteEarlyAdopterFeature = `-- name: DeleteEarlyAdopterFeature :exec +DELETE FROM convoy.early_adopter_features +WHERE organisation_id = $1 AND feature_key = $2 +` + +type DeleteEarlyAdopterFeatureParams struct { + OrganisationID pgtype.Text + FeatureKey pgtype.Text +} + +func (q *Queries) DeleteEarlyAdopterFeature(ctx context.Context, arg DeleteEarlyAdopterFeatureParams) error { + _, err := q.db.Exec(ctx, deleteEarlyAdopterFeature, arg.OrganisationID, arg.FeatureKey) + return err +} diff --git a/internal/jobs/impl.go b/internal/jobs/impl.go new file mode 100644 index 0000000000..67b66777c3 --- /dev/null +++ b/internal/jobs/impl.go @@ -0,0 +1,292 @@ +package jobs + +import ( + "context" + "errors" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + + "github.com/frain-dev/convoy/database" + "github.com/frain-dev/convoy/datastore" + "github.com/frain-dev/convoy/internal/common" + "github.com/frain-dev/convoy/internal/jobs/repo" + log "github.com/frain-dev/convoy/pkg/logger" +) + +var ( + ErrJobNotCreated = errors.New("job could not be created") + ErrJobNotUpdated = errors.New("job could not be updated") + ErrJobNotDeleted = errors.New("job could not be deleted") +) + +// Service implements datastore.JobRepository using sqlc-generated queries +type Service struct { + logger log.Logger + repo repo.Querier + db *pgxpool.Pool +} + +// Ensure Service implements datastore.JobRepository at compile time +var _ datastore.JobRepository = (*Service)(nil) + +// New creates a new jobs service +func New(logger log.Logger, db database.Database) *Service { + return &Service{ + logger: logger, + repo: repo.New(db.GetConn()), + db: db.GetConn(), + } +} + +// rowToJob converts any sqlc-generated row struct to datastore.Job +func rowToJob(row interface{}) datastore.Job { + switch r := row.(type) { + case repo.FetchJobByIdRow: + return datastore.Job{ + UID: r.ID, + Type: r.Type, + Status: datastore.JobStatus(r.Status), + ProjectID: r.ProjectID, + StartedAt: common.PgTimestamptzToNullTime(r.StartedAt), + CompletedAt: common.PgTimestamptzToNullTime(r.CompletedAt), + FailedAt: common.PgTimestamptzToNullTime(r.FailedAt), + CreatedAt: common.PgTimestamptzToTime(r.CreatedAt), + UpdatedAt: common.PgTimestamptzToTime(r.UpdatedAt), + DeletedAt: common.PgTimestamptzToNullTime(r.DeletedAt), + } + case repo.FetchRunningJobsByProjectIdRow: + return datastore.Job{ + UID: r.ID, + Type: r.Type, + Status: datastore.JobStatus(r.Status), + ProjectID: r.ProjectID, + StartedAt: common.PgTimestamptzToNullTime(r.StartedAt), + CompletedAt: common.PgTimestamptzToNullTime(r.CompletedAt), + FailedAt: common.PgTimestamptzToNullTime(r.FailedAt), + CreatedAt: common.PgTimestamptzToTime(r.CreatedAt), + UpdatedAt: common.PgTimestamptzToTime(r.UpdatedAt), + DeletedAt: common.PgTimestamptzToNullTime(r.DeletedAt), + } + case repo.FetchJobsByProjectIdRow: + return datastore.Job{ + UID: r.ID, + Type: r.Type, + Status: datastore.JobStatus(r.Status), + ProjectID: r.ProjectID, + StartedAt: common.PgTimestamptzToNullTime(r.StartedAt), + CompletedAt: common.PgTimestamptzToNullTime(r.CompletedAt), + FailedAt: common.PgTimestamptzToNullTime(r.FailedAt), + CreatedAt: common.PgTimestamptzToTime(r.CreatedAt), + UpdatedAt: common.PgTimestamptzToTime(r.UpdatedAt), + DeletedAt: common.PgTimestamptzToNullTime(r.DeletedAt), + } + case repo.FetchJobsPaginatedRow: + return datastore.Job{ + UID: r.ID, + Type: r.Type, + Status: datastore.JobStatus(r.Status), + ProjectID: r.ProjectID, + StartedAt: common.PgTimestamptzToNullTime(r.StartedAt), + CompletedAt: common.PgTimestamptzToNullTime(r.CompletedAt), + FailedAt: common.PgTimestamptzToNullTime(r.FailedAt), + CreatedAt: common.PgTimestamptzToTime(r.CreatedAt), + UpdatedAt: common.PgTimestamptzToTime(r.UpdatedAt), + } + default: + return datastore.Job{} + } +} + +// CreateJob creates a new job +func (s *Service) CreateJob(ctx context.Context, job *datastore.Job) error { + err := s.repo.CreateJob(ctx, repo.CreateJobParams{ + ID: common.StringToPgText(job.UID), + Type: common.StringToPgText(job.Type), + Status: common.StringToPgText(string(job.Status)), + ProjectID: common.StringToPgText(job.ProjectID), + }) + if err != nil { + s.logger.Error("failed to create job", "error", err) + return err + } + + return nil +} + +// MarkJobAsStarted marks a job as started +func (s *Service) MarkJobAsStarted(ctx context.Context, uid, projectID string) error { + result, err := s.repo.MarkJobAsStarted(ctx, repo.MarkJobAsStartedParams{ + ID: common.StringToPgText(uid), + ProjectID: common.StringToPgText(projectID), + }) + if err != nil { + s.logger.Error("failed to mark job as started", "error", err) + return err + } + + if result.RowsAffected() < 1 { + return ErrJobNotUpdated + } + + return nil +} + +// MarkJobAsCompleted marks a job as completed +func (s *Service) MarkJobAsCompleted(ctx context.Context, uid, projectID string) error { + result, err := s.repo.MarkJobAsCompleted(ctx, repo.MarkJobAsCompletedParams{ + ID: common.StringToPgText(uid), + ProjectID: common.StringToPgText(projectID), + }) + if err != nil { + s.logger.Error("failed to mark job as completed", "error", err) + return err + } + + if result.RowsAffected() < 1 { + return ErrJobNotUpdated + } + + return nil +} + +// MarkJobAsFailed marks a job as failed +func (s *Service) MarkJobAsFailed(ctx context.Context, uid, projectID string) error { + result, err := s.repo.MarkJobAsFailed(ctx, repo.MarkJobAsFailedParams{ + ID: common.StringToPgText(uid), + ProjectID: common.StringToPgText(projectID), + }) + if err != nil { + s.logger.Error("failed to mark job as failed", "error", err) + return err + } + + if result.RowsAffected() < 1 { + return ErrJobNotUpdated + } + + return nil +} + +// DeleteJob soft-deletes a job +func (s *Service) DeleteJob(ctx context.Context, uid, projectID string) error { + result, err := s.repo.DeleteJob(ctx, repo.DeleteJobParams{ + ID: common.StringToPgText(uid), + ProjectID: common.StringToPgText(projectID), + }) + if err != nil { + s.logger.Error("failed to delete job", "error", err) + return err + } + + if result.RowsAffected() < 1 { + return ErrJobNotDeleted + } + + return nil +} + +// FetchJobById retrieves a job by its ID +func (s *Service) FetchJobById(ctx context.Context, uid, projectID string) (*datastore.Job, error) { + row, err := s.repo.FetchJobById(ctx, repo.FetchJobByIdParams{ + ID: common.StringToPgText(uid), + ProjectID: common.StringToPgText(projectID), + }) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, datastore.ErrJobNotFound + } + s.logger.Error("failed to fetch job by id", "error", err) + return nil, err + } + + job := rowToJob(row) + return &job, nil +} + +// FetchRunningJobsByProjectId retrieves all running jobs for a project +func (s *Service) FetchRunningJobsByProjectId(ctx context.Context, projectID string) ([]datastore.Job, error) { + rows, err := s.repo.FetchRunningJobsByProjectId(ctx, common.StringToPgText(projectID)) + if err != nil { + s.logger.Error("failed to fetch running jobs", "error", err) + return nil, err + } + + jobs := make([]datastore.Job, 0, len(rows)) + for _, row := range rows { + jobs = append(jobs, rowToJob(row)) + } + + return jobs, nil +} + +// FetchJobsByProjectId retrieves all jobs for a project +func (s *Service) FetchJobsByProjectId(ctx context.Context, projectID string) ([]datastore.Job, error) { + rows, err := s.repo.FetchJobsByProjectId(ctx, common.StringToPgText(projectID)) + if err != nil { + s.logger.Error("failed to fetch jobs by project id", "error", err) + return nil, err + } + + jobs := make([]datastore.Job, 0, len(rows)) + for _, row := range rows { + jobs = append(jobs, rowToJob(row)) + } + + return jobs, nil +} + +// LoadJobsPaged retrieves jobs with pagination +func (s *Service) LoadJobsPaged(ctx context.Context, projectID string, pageable datastore.Pageable) ([]datastore.Job, datastore.PaginationData, error) { + direction := "next" + if pageable.Direction == datastore.Prev { + direction = "prev" + } + + rows, err := s.repo.FetchJobsPaginated(ctx, repo.FetchJobsPaginatedParams{ + Direction: direction, + ProjectID: common.StringToPgText(projectID), + Cursor: common.StringToPgText(pageable.Cursor()), + LimitVal: int64(pageable.Limit()), + }) + if err != nil { + s.logger.Error("failed to load jobs paged", "error", err) + return nil, datastore.PaginationData{}, err + } + + jobs := make([]datastore.Job, 0, len(rows)) + for _, row := range rows { + jobs = append(jobs, rowToJob(row)) + } + + // Count previous rows for pagination metadata + var prevRowCount datastore.PrevRowCount + if len(jobs) > 0 { + first := jobs[0] + count, err := s.repo.CountPrevJobs(ctx, repo.CountPrevJobsParams{ + ProjectID: common.StringToPgText(projectID), + Cursor: common.StringToPgText(first.UID), + }) + if err != nil { + s.logger.Error("failed to count prev jobs", "error", err) + return nil, datastore.PaginationData{}, err + } + prevRowCount.Count = int(count.Int64) + } + + // Build pagination metadata with untrimmed ids + ids := make([]string, len(jobs)) + for i := range jobs { + ids[i] = jobs[i].UID + } + + pagination := &datastore.PaginationData{PrevRowCount: prevRowCount} + pagination = pagination.Build(pageable, ids) + + // Trim LIMIT+1 after building pagination + if len(jobs) > pageable.PerPage { + jobs = jobs[:len(jobs)-1] + } + + return jobs, *pagination, nil +} diff --git a/internal/jobs/impl_test.go b/internal/jobs/impl_test.go new file mode 100644 index 0000000000..3ecc73594c --- /dev/null +++ b/internal/jobs/impl_test.go @@ -0,0 +1,316 @@ +package jobs + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/oklog/ulid/v2" + "github.com/stretchr/testify/require" + "github.com/frain-dev/convoy/config" + "github.com/frain-dev/convoy/database" + "github.com/frain-dev/convoy/database/hooks" + "github.com/frain-dev/convoy/database/postgres" + "github.com/frain-dev/convoy/datastore" + "github.com/frain-dev/convoy/internal/organisations" + "github.com/frain-dev/convoy/internal/pkg/keys" + "github.com/frain-dev/convoy/internal/projects" + "github.com/frain-dev/convoy/internal/users" + log "github.com/frain-dev/convoy/pkg/logger" + "github.com/frain-dev/convoy/testenv" +) + +var testEnv *testenv.Environment + +func TestMain(m *testing.M) { + res, cleanup, err := testenv.Launch(context.Background()) + if err != nil { + panic(err) + } + testEnv = res + + code := m.Run() + + if err := cleanup(); err != nil { + fmt.Printf("failed to cleanup: %v\n", err) + } + + os.Exit(code) +} + +func setupTestDB(t *testing.T) (database.Database, *Service) { + t.Helper() + + err := config.LoadConfig("") + require.NoError(t, err) + + conn, err := testEnv.CloneTestDatabase(t, "convoy") + require.NoError(t, err) + + dbHooks := hooks.Init() + dbHooks.RegisterHook(datastore.EndpointCreated, func(ctx context.Context, data interface{}, changelog interface{}) {}) + + db := postgres.NewFromConnection(conn) + + km, err := keys.NewLocalKeyManager("test") + require.NoError(t, err) + + if km.IsSet() { + _, err = km.GetCurrentKeyFromCache() + require.NoError(t, err) + } + + err = keys.Set(km) + require.NoError(t, err) + + logger := log.New("convoy", log.LevelInfo) + return db, New(logger, db) +} + +func seedProject(t *testing.T, db database.Database) *datastore.Project { + t.Helper() + ctx := context.Background() + logger := log.New("convoy", log.LevelInfo) + + userRepo := users.New(logger, db) + user := &datastore.User{ + UID: ulid.Make().String(), + FirstName: "Test", + LastName: "User", + Email: fmt.Sprintf("test-%s@example.com", ulid.Make().String()), + } + require.NoError(t, userRepo.CreateUser(ctx, user)) + + orgRepo := organisations.New(logger, db) + org := &datastore.Organisation{ + UID: ulid.Make().String(), + Name: "Test Org", + OwnerID: user.UID, + } + require.NoError(t, orgRepo.CreateOrganisation(ctx, org)) + + projectRepo := projects.New(logger, db) + projectConfig := datastore.DefaultProjectConfig + project := &datastore.Project{ + UID: ulid.Make().String(), + Name: "Test Project", + Type: datastore.OutgoingProject, + OrganisationID: org.UID, + Config: &projectConfig, + } + require.NoError(t, projectRepo.CreateProject(ctx, project)) + + return project +} + +func TestCreateJob(t *testing.T) { + db, svc := setupTestDB(t) + project := seedProject(t, db) + ctx := context.Background() + + job := &datastore.Job{ + UID: ulid.Make().String(), + Type: "search_tokenizer", + Status: datastore.JobStatusReady, + ProjectID: project.UID, + } + + require.NoError(t, svc.CreateJob(ctx, job)) + + fetched, err := svc.FetchJobById(ctx, job.UID, job.ProjectID) + require.NoError(t, err) + require.Equal(t, job.UID, fetched.UID) + require.Equal(t, datastore.JobStatusReady, fetched.Status) + require.Equal(t, "search_tokenizer", fetched.Type) +} + +func TestFetchJobById(t *testing.T) { + db, svc := setupTestDB(t) + project := seedProject(t, db) + ctx := context.Background() + + job := &datastore.Job{ + UID: ulid.Make().String(), + Type: "search_tokenizer", + Status: datastore.JobStatusReady, + ProjectID: project.UID, + } + require.NoError(t, svc.CreateJob(ctx, job)) + + t.Run("found", func(t *testing.T) { + fetched, err := svc.FetchJobById(ctx, job.UID, job.ProjectID) + require.NoError(t, err) + require.Equal(t, job.UID, fetched.UID) + }) + + t.Run("not found", func(t *testing.T) { + _, err := svc.FetchJobById(ctx, "nonexistent", project.UID) + require.Equal(t, datastore.ErrJobNotFound, err) + }) +} + +func TestFetchJobsByProjectId(t *testing.T) { + db, svc := setupTestDB(t) + p1 := seedProject(t, db) + p2 := seedProject(t, db) + ctx := context.Background() + + require.NoError(t, svc.CreateJob(ctx, &datastore.Job{ + UID: ulid.Make().String(), + Type: "create", + Status: datastore.JobStatusRunning, + ProjectID: p1.UID, + })) + + require.NoError(t, svc.CreateJob(ctx, &datastore.Job{ + UID: ulid.Make().String(), + Type: "update", + Status: datastore.JobStatusCompleted, + ProjectID: p2.UID, + })) + + require.NoError(t, svc.CreateJob(ctx, &datastore.Job{ + UID: ulid.Make().String(), + Type: "update", + Status: datastore.JobStatusFailed, + ProjectID: p2.UID, + })) + + jobs, err := svc.FetchJobsByProjectId(ctx, p2.UID) + require.NoError(t, err) + require.Equal(t, 2, len(jobs)) +} + +func TestFetchRunningJobsByProjectId(t *testing.T) { + db, svc := setupTestDB(t) + p1 := seedProject(t, db) + p2 := seedProject(t, db) + ctx := context.Background() + + require.NoError(t, svc.CreateJob(ctx, &datastore.Job{ + UID: ulid.Make().String(), Type: "create", Status: datastore.JobStatusRunning, ProjectID: p1.UID, + })) + + require.NoError(t, svc.CreateJob(ctx, &datastore.Job{ + UID: ulid.Make().String(), Type: "update", Status: datastore.JobStatusRunning, ProjectID: p2.UID, + })) + + require.NoError(t, svc.CreateJob(ctx, &datastore.Job{ + UID: ulid.Make().String(), Type: "update", Status: datastore.JobStatusFailed, ProjectID: p2.UID, + })) + + jobs, err := svc.FetchRunningJobsByProjectId(ctx, p2.UID) + require.NoError(t, err) + require.Equal(t, 1, len(jobs)) +} + +func TestMarkJobAsStarted(t *testing.T) { + db, svc := setupTestDB(t) + project := seedProject(t, db) + ctx := context.Background() + + job := &datastore.Job{ + UID: ulid.Make().String(), Type: "search_tokenizer", Status: datastore.JobStatusReady, ProjectID: project.UID, + } + require.NoError(t, svc.CreateJob(ctx, job)) + require.NoError(t, svc.MarkJobAsStarted(ctx, job.UID, job.ProjectID)) + + fetched, err := svc.FetchJobById(ctx, job.UID, job.ProjectID) + require.NoError(t, err) + require.Equal(t, datastore.JobStatusRunning, fetched.Status) + require.True(t, fetched.StartedAt.Valid) + require.True(t, time.Now().After(fetched.StartedAt.Time)) + require.False(t, fetched.FailedAt.Valid) + require.False(t, fetched.CompletedAt.Valid) +} + +func TestMarkJobAsCompleted(t *testing.T) { + db, svc := setupTestDB(t) + project := seedProject(t, db) + ctx := context.Background() + + job := &datastore.Job{ + UID: ulid.Make().String(), Type: "search_tokenizer", Status: datastore.JobStatusReady, ProjectID: project.UID, + } + require.NoError(t, svc.CreateJob(ctx, job)) + require.NoError(t, svc.MarkJobAsStarted(ctx, job.UID, job.ProjectID)) + require.NoError(t, svc.MarkJobAsCompleted(ctx, job.UID, job.ProjectID)) + + fetched, err := svc.FetchJobById(ctx, job.UID, job.ProjectID) + require.NoError(t, err) + require.Equal(t, datastore.JobStatusCompleted, fetched.Status) + require.True(t, fetched.StartedAt.Valid) + require.True(t, fetched.CompletedAt.Valid) + require.False(t, fetched.FailedAt.Valid) +} + +func TestMarkJobAsFailed(t *testing.T) { + db, svc := setupTestDB(t) + project := seedProject(t, db) + ctx := context.Background() + + job := &datastore.Job{ + UID: ulid.Make().String(), Type: "search_tokenizer", Status: datastore.JobStatusReady, ProjectID: project.UID, + } + require.NoError(t, svc.CreateJob(ctx, job)) + require.NoError(t, svc.MarkJobAsStarted(ctx, job.UID, job.ProjectID)) + require.NoError(t, svc.MarkJobAsFailed(ctx, job.UID, job.ProjectID)) + + fetched, err := svc.FetchJobById(ctx, job.UID, job.ProjectID) + require.NoError(t, err) + require.Equal(t, datastore.JobStatusFailed, fetched.Status) + require.True(t, fetched.StartedAt.Valid) + require.True(t, fetched.FailedAt.Valid) + require.False(t, fetched.CompletedAt.Valid) +} + +func TestDeleteJob(t *testing.T) { + db, svc := setupTestDB(t) + project := seedProject(t, db) + ctx := context.Background() + + job := &datastore.Job{ + UID: ulid.Make().String(), Type: "search_tokenizer", Status: datastore.JobStatusReady, ProjectID: project.UID, + } + require.NoError(t, svc.CreateJob(ctx, job)) + require.NoError(t, svc.DeleteJob(ctx, job.UID, job.ProjectID)) + + _, err := svc.FetchJobById(ctx, job.UID, job.ProjectID) + require.Equal(t, datastore.ErrJobNotFound, err) +} + +func TestLoadJobsPaged(t *testing.T) { + tests := []struct { + name string + pageData datastore.Pageable + count int + perPage int + }{ + {name: "10 records, page size 3", pageData: datastore.Pageable{PerPage: 3}, count: 10, perPage: 3}, + {name: "12 records, page size 4", pageData: datastore.Pageable{PerPage: 4}, count: 12, perPage: 4}, + {name: "5 records, page size 3", pageData: datastore.Pageable{PerPage: 3}, count: 5, perPage: 3}, + {name: "1 record, page size 3", pageData: datastore.Pageable{PerPage: 3}, count: 1, perPage: 3}, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + db, svc := setupTestDB(t) + project := seedProject(t, db) + ctx := context.Background() + + for i := 0; i < tc.count; i++ { + require.NoError(t, svc.CreateJob(ctx, &datastore.Job{ + UID: ulid.Make().String(), + ProjectID: project.UID, + Status: datastore.JobStatusReady, + })) + } + + _, pageable, err := svc.LoadJobsPaged(ctx, project.UID, tc.pageData) + require.NoError(t, err) + require.Equal(t, int64(tc.perPage), pageable.PerPage) + }) + } +} diff --git a/internal/jobs/queries.sql b/internal/jobs/queries.sql new file mode 100644 index 0000000000..af6a5450e0 --- /dev/null +++ b/internal/jobs/queries.sql @@ -0,0 +1,80 @@ +-- Jobs Repository SQL Queries +-- Schema: convoy.jobs +-- Columns: id, type, status, project_id, started_at, completed_at, failed_at, created_at, updated_at, deleted_at + +-- name: CreateJob :exec +INSERT INTO convoy.jobs (id, type, status, project_id) +VALUES (@id, @type, @status, @project_id); + +-- name: MarkJobAsStarted :execresult +UPDATE convoy.jobs SET + status = 'running', + started_at = NOW(), + updated_at = NOW() +WHERE id = @id AND project_id = @project_id AND deleted_at IS NULL; + +-- name: MarkJobAsCompleted :execresult +UPDATE convoy.jobs SET + status = 'completed', + completed_at = NOW(), + updated_at = NOW() +WHERE id = @id AND project_id = @project_id AND deleted_at IS NULL; + +-- name: MarkJobAsFailed :execresult +UPDATE convoy.jobs SET + status = 'failed', + failed_at = NOW(), + updated_at = NOW() +WHERE id = @id AND project_id = @project_id AND deleted_at IS NULL; + +-- name: DeleteJob :execresult +UPDATE convoy.jobs SET + deleted_at = NOW() +WHERE id = @id AND project_id = @project_id AND deleted_at IS NULL; + +-- name: FetchJobById :one +SELECT id, type, status, project_id, started_at, completed_at, failed_at, created_at, updated_at, deleted_at +FROM convoy.jobs +WHERE id = @id AND project_id = @project_id AND deleted_at IS NULL; + +-- name: FetchRunningJobsByProjectId :many +SELECT id, type, status, project_id, started_at, completed_at, failed_at, created_at, updated_at, deleted_at +FROM convoy.jobs +WHERE status = 'running' AND project_id = @project_id AND deleted_at IS NULL; + +-- name: FetchJobsByProjectId :many +SELECT id, type, status, project_id, started_at, completed_at, failed_at, created_at, updated_at, deleted_at +FROM convoy.jobs +WHERE project_id = @project_id AND deleted_at IS NULL; + +-- name: FetchJobsPaginated :many +WITH filtered_jobs AS ( + SELECT id, type, status, project_id, + started_at, completed_at, failed_at, + created_at, updated_at + FROM convoy.jobs + WHERE deleted_at IS NULL + AND project_id = @project_id + AND ( + CASE + WHEN @direction::text = 'next' THEN id <= @cursor + WHEN @direction::text = 'prev' THEN id >= @cursor + ELSE true + END + ) + ORDER BY + CASE WHEN @direction::text = 'next' THEN id END DESC, + CASE WHEN @direction::text = 'prev' THEN id END ASC + LIMIT @limit_val +) +SELECT * FROM filtered_jobs +ORDER BY + CASE WHEN @direction::text = 'prev' THEN id END DESC, + CASE WHEN @direction::text = 'next' THEN id END DESC; + +-- name: CountPrevJobs :one +SELECT COALESCE(COUNT(DISTINCT(id)), 0) AS count +FROM convoy.jobs +WHERE deleted_at IS NULL + AND project_id = @project_id + AND id > @cursor; diff --git a/internal/jobs/repo/db.go b/internal/jobs/repo/db.go new file mode 100644 index 0000000000..71f6cab52b --- /dev/null +++ b/internal/jobs/repo/db.go @@ -0,0 +1,32 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.30.0 + +package repo + +import ( + "context" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" +) + +type DBTX interface { + Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error) + Query(context.Context, string, ...interface{}) (pgx.Rows, error) + QueryRow(context.Context, string, ...interface{}) pgx.Row +} + +func New(db DBTX) *Queries { + return &Queries{db: db} +} + +type Queries struct { + db DBTX +} + +func (q *Queries) WithTx(tx pgx.Tx) *Queries { + return &Queries{ + db: tx, + } +} diff --git a/internal/jobs/repo/models.go b/internal/jobs/repo/models.go new file mode 100644 index 0000000000..434464690a --- /dev/null +++ b/internal/jobs/repo/models.go @@ -0,0 +1,5 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.30.0 + +package repo diff --git a/internal/jobs/repo/querier.go b/internal/jobs/repo/querier.go new file mode 100644 index 0000000000..5bfd306353 --- /dev/null +++ b/internal/jobs/repo/querier.go @@ -0,0 +1,31 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.30.0 + +package repo + +import ( + "context" + + "github.com/jackc/pgx/v5/pgconn" + "github.com/jackc/pgx/v5/pgtype" +) + +type Querier interface { + // Count records before the given cursor (for pagination metadata) + CountPrevJobs(ctx context.Context, arg CountPrevJobsParams) (pgtype.Int8, error) + // Jobs Repository SQL Queries + // Schema: convoy.jobs + // Columns: id, type, status, project_id, started_at, completed_at, failed_at, created_at, updated_at, deleted_at + CreateJob(ctx context.Context, arg CreateJobParams) error + DeleteJob(ctx context.Context, arg DeleteJobParams) (pgconn.CommandTag, error) + FetchJobById(ctx context.Context, arg FetchJobByIdParams) (FetchJobByIdRow, error) + FetchJobsByProjectId(ctx context.Context, projectID pgtype.Text) ([]FetchJobsByProjectIdRow, error) + FetchJobsPaginated(ctx context.Context, arg FetchJobsPaginatedParams) ([]FetchJobsPaginatedRow, error) + FetchRunningJobsByProjectId(ctx context.Context, projectID pgtype.Text) ([]FetchRunningJobsByProjectIdRow, error) + MarkJobAsCompleted(ctx context.Context, arg MarkJobAsCompletedParams) (pgconn.CommandTag, error) + MarkJobAsFailed(ctx context.Context, arg MarkJobAsFailedParams) (pgconn.CommandTag, error) + MarkJobAsStarted(ctx context.Context, arg MarkJobAsStartedParams) (pgconn.CommandTag, error) +} + +var _ Querier = (*Queries)(nil) diff --git a/internal/jobs/repo/queries.sql.go b/internal/jobs/repo/queries.sql.go new file mode 100644 index 0000000000..59d07a427d --- /dev/null +++ b/internal/jobs/repo/queries.sql.go @@ -0,0 +1,350 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.30.0 +// source: queries.sql + +package repo + +import ( + "context" + + "github.com/jackc/pgx/v5/pgconn" + "github.com/jackc/pgx/v5/pgtype" +) + +const countPrevJobs = `-- name: CountPrevJobs :one +SELECT COALESCE(COUNT(DISTINCT(id)), 0) AS count +FROM convoy.jobs +WHERE deleted_at IS NULL + AND project_id = $1 + AND id > $2 +` + +type CountPrevJobsParams struct { + ProjectID pgtype.Text + Cursor pgtype.Text +} + +// Count records before the given cursor (for pagination metadata) +func (q *Queries) CountPrevJobs(ctx context.Context, arg CountPrevJobsParams) (pgtype.Int8, error) { + row := q.db.QueryRow(ctx, countPrevJobs, + arg.ProjectID, + arg.Cursor, + ) + var count pgtype.Int8 + err := row.Scan(&count) + return count, err +} + +const createJob = `-- name: CreateJob :exec +INSERT INTO convoy.jobs (id, type, status, project_id) +VALUES ($1, $2, $3, $4) +` + +type CreateJobParams struct { + ID pgtype.Text + Type pgtype.Text + Status pgtype.Text + ProjectID pgtype.Text +} + +// Jobs Repository SQL Queries +// Schema: convoy.jobs +// Columns: id, type, status, project_id, started_at, completed_at, failed_at, created_at, updated_at, deleted_at +func (q *Queries) CreateJob(ctx context.Context, arg CreateJobParams) error { + _, err := q.db.Exec(ctx, createJob, + arg.ID, + arg.Type, + arg.Status, + arg.ProjectID, + ) + return err +} + +const deleteJob = `-- name: DeleteJob :execresult +UPDATE convoy.jobs SET + deleted_at = NOW() +WHERE id = $1 AND project_id = $2 AND deleted_at IS NULL +` + +type DeleteJobParams struct { + ID pgtype.Text + ProjectID pgtype.Text +} + +func (q *Queries) DeleteJob(ctx context.Context, arg DeleteJobParams) (pgconn.CommandTag, error) { + return q.db.Exec(ctx, deleteJob, arg.ID, arg.ProjectID) +} + +const fetchJobById = `-- name: FetchJobById :one +SELECT id, type, status, project_id, started_at, completed_at, failed_at, created_at, updated_at, deleted_at +FROM convoy.jobs +WHERE id = $1 AND project_id = $2 AND deleted_at IS NULL +` + +type FetchJobByIdParams struct { + ID pgtype.Text + ProjectID pgtype.Text +} + +type FetchJobByIdRow struct { + ID string + Type string + Status string + ProjectID string + StartedAt pgtype.Timestamptz + CompletedAt pgtype.Timestamptz + FailedAt pgtype.Timestamptz + CreatedAt pgtype.Timestamptz + UpdatedAt pgtype.Timestamptz + DeletedAt pgtype.Timestamptz +} + +func (q *Queries) FetchJobById(ctx context.Context, arg FetchJobByIdParams) (FetchJobByIdRow, error) { + row := q.db.QueryRow(ctx, fetchJobById, arg.ID, arg.ProjectID) + var i FetchJobByIdRow + err := row.Scan( + &i.ID, + &i.Type, + &i.Status, + &i.ProjectID, + &i.StartedAt, + &i.CompletedAt, + &i.FailedAt, + &i.CreatedAt, + &i.UpdatedAt, + &i.DeletedAt, + ) + return i, err +} + +const fetchRunningJobsByProjectId = `-- name: FetchRunningJobsByProjectId :many +SELECT id, type, status, project_id, started_at, completed_at, failed_at, created_at, updated_at, deleted_at +FROM convoy.jobs +WHERE status = 'running' AND project_id = $1 AND deleted_at IS NULL +` + +type FetchRunningJobsByProjectIdRow struct { + ID string + Type string + Status string + ProjectID string + StartedAt pgtype.Timestamptz + CompletedAt pgtype.Timestamptz + FailedAt pgtype.Timestamptz + CreatedAt pgtype.Timestamptz + UpdatedAt pgtype.Timestamptz + DeletedAt pgtype.Timestamptz +} + +func (q *Queries) FetchRunningJobsByProjectId(ctx context.Context, projectID pgtype.Text) ([]FetchRunningJobsByProjectIdRow, error) { + rows, err := q.db.Query(ctx, fetchRunningJobsByProjectId, projectID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []FetchRunningJobsByProjectIdRow + for rows.Next() { + var i FetchRunningJobsByProjectIdRow + if err := rows.Scan( + &i.ID, + &i.Type, + &i.Status, + &i.ProjectID, + &i.StartedAt, + &i.CompletedAt, + &i.FailedAt, + &i.CreatedAt, + &i.UpdatedAt, + &i.DeletedAt, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const fetchJobsByProjectId = `-- name: FetchJobsByProjectId :many +SELECT id, type, status, project_id, started_at, completed_at, failed_at, created_at, updated_at, deleted_at +FROM convoy.jobs +WHERE project_id = $1 AND deleted_at IS NULL +` + +type FetchJobsByProjectIdRow struct { + ID string + Type string + Status string + ProjectID string + StartedAt pgtype.Timestamptz + CompletedAt pgtype.Timestamptz + FailedAt pgtype.Timestamptz + CreatedAt pgtype.Timestamptz + UpdatedAt pgtype.Timestamptz + DeletedAt pgtype.Timestamptz +} + +func (q *Queries) FetchJobsByProjectId(ctx context.Context, projectID pgtype.Text) ([]FetchJobsByProjectIdRow, error) { + rows, err := q.db.Query(ctx, fetchJobsByProjectId, projectID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []FetchJobsByProjectIdRow + for rows.Next() { + var i FetchJobsByProjectIdRow + if err := rows.Scan( + &i.ID, + &i.Type, + &i.Status, + &i.ProjectID, + &i.StartedAt, + &i.CompletedAt, + &i.FailedAt, + &i.CreatedAt, + &i.UpdatedAt, + &i.DeletedAt, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const fetchJobsPaginated = `-- name: FetchJobsPaginated :many +WITH filtered_jobs AS ( + SELECT id, type, status, project_id, + started_at, completed_at, failed_at, + created_at, updated_at + FROM convoy.jobs + WHERE deleted_at IS NULL + AND project_id = $2 + AND ( + CASE + WHEN $1::text = 'next' THEN id <= $3 + WHEN $1::text = 'prev' THEN id >= $3 + ELSE true + END + ) + ORDER BY + CASE WHEN $1::text = 'next' THEN id END DESC, + CASE WHEN $1::text = 'prev' THEN id END ASC + LIMIT $4 +) +SELECT id, type, status, project_id, started_at, completed_at, failed_at, created_at, updated_at FROM filtered_jobs +ORDER BY + CASE WHEN $1::text = 'prev' THEN id END DESC, + CASE WHEN $1::text = 'next' THEN id END DESC +` + +type FetchJobsPaginatedParams struct { + Direction string + ProjectID pgtype.Text + Cursor pgtype.Text + LimitVal int64 +} + +type FetchJobsPaginatedRow struct { + ID string + Type string + Status string + ProjectID string + StartedAt pgtype.Timestamptz + CompletedAt pgtype.Timestamptz + FailedAt pgtype.Timestamptz + CreatedAt pgtype.Timestamptz + UpdatedAt pgtype.Timestamptz +} + +func (q *Queries) FetchJobsPaginated(ctx context.Context, arg FetchJobsPaginatedParams) ([]FetchJobsPaginatedRow, error) { + rows, err := q.db.Query(ctx, fetchJobsPaginated, + arg.Direction, + arg.ProjectID, + arg.Cursor, + arg.LimitVal, + ) + if err != nil { + return nil, err + } + defer rows.Close() + var items []FetchJobsPaginatedRow + for rows.Next() { + var i FetchJobsPaginatedRow + if err := rows.Scan( + &i.ID, + &i.Type, + &i.Status, + &i.ProjectID, + &i.StartedAt, + &i.CompletedAt, + &i.FailedAt, + &i.CreatedAt, + &i.UpdatedAt, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const markJobAsStarted = `-- name: MarkJobAsStarted :execresult +UPDATE convoy.jobs SET + status = 'running', + started_at = NOW(), + updated_at = NOW() +WHERE id = $1 AND project_id = $2 AND deleted_at IS NULL +` + +type MarkJobAsStartedParams struct { + ID pgtype.Text + ProjectID pgtype.Text +} + +func (q *Queries) MarkJobAsStarted(ctx context.Context, arg MarkJobAsStartedParams) (pgconn.CommandTag, error) { + return q.db.Exec(ctx, markJobAsStarted, arg.ID, arg.ProjectID) +} + +const markJobAsCompleted = `-- name: MarkJobAsCompleted :execresult +UPDATE convoy.jobs SET + status = 'completed', + completed_at = NOW(), + updated_at = NOW() +WHERE id = $1 AND project_id = $2 AND deleted_at IS NULL +` + +type MarkJobAsCompletedParams struct { + ID pgtype.Text + ProjectID pgtype.Text +} + +func (q *Queries) MarkJobAsCompleted(ctx context.Context, arg MarkJobAsCompletedParams) (pgconn.CommandTag, error) { + return q.db.Exec(ctx, markJobAsCompleted, arg.ID, arg.ProjectID) +} + +const markJobAsFailed = `-- name: MarkJobAsFailed :execresult +UPDATE convoy.jobs SET + status = 'failed', + failed_at = NOW(), + updated_at = NOW() +WHERE id = $1 AND project_id = $2 AND deleted_at IS NULL +` + +type MarkJobAsFailedParams struct { + ID pgtype.Text + ProjectID pgtype.Text +} + +func (q *Queries) MarkJobAsFailed(ctx context.Context, arg MarkJobAsFailedParams) (pgconn.CommandTag, error) { + return q.db.Exec(ctx, markJobAsFailed, arg.ID, arg.ProjectID) +} diff --git a/sqlc.yaml b/sqlc.yaml index 041f25841f..f9f681e60f 100644 --- a/sqlc.yaml +++ b/sqlc.yaml @@ -202,3 +202,23 @@ sql: overrides: - column: "convoy.event_deliveries.delivery_mode" go_type: "string" + - queries: ./internal/jobs/queries.sql + engine: postgresql + database: *db_config + gen: + go: + package: "repo" + out: "./internal/jobs/repo" + sql_package: "pgx/v5" + omit_unused_structs: true + emit_interface: true + - queries: ./internal/feature_flags/queries.sql + engine: postgresql + database: *db_config + gen: + go: + package: "repo" + out: "./internal/feature_flags/repo" + sql_package: "pgx/v5" + omit_unused_structs: true + emit_interface: true From 02c8aea9f91af904cd4cbc8cb2db29d38ef74caf Mon Sep 17 00:00:00 2001 From: Raymond Tukpe Date: Wed, 15 Apr 2026 15:16:08 +0200 Subject: [PATCH 2/7] refactor: centralize feature flag service usage across handlers Replace direct instantiation of feature flag service with centralized `FeatureFlagService` in application context. Update relevant API handlers for consistency. --- api/handlers/organisation.go | 15 +++++++-------- api/types/types.go | 2 ++ cmd/server/server.go | 1 + 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/api/handlers/organisation.go b/api/handlers/organisation.go index abbaa2795b..0d19b1331e 100644 --- a/api/handlers/organisation.go +++ b/api/handlers/organisation.go @@ -17,7 +17,6 @@ import ( "github.com/frain-dev/convoy/auth" "github.com/frain-dev/convoy/datastore" "github.com/frain-dev/convoy/internal/event_deliveries" - "github.com/frain-dev/convoy/internal/feature_flags" "github.com/frain-dev/convoy/internal/organisation_members" "github.com/frain-dev/convoy/internal/organisations" "github.com/frain-dev/convoy/internal/pkg/batch_tracker" @@ -289,7 +288,7 @@ func (h *Handler) GetEarlyAdopterFeatures(w http.ResponseWriter, r *http.Request features := fflag.GetEarlyAdopterFeatures() responseFeatures := make([]models.EarlyAdopterFeature, 0, len(features)) - ffService := feature_flags.New(h.A.Logger, h.A.DB) + ffService := h.A.FeatureFlagService earlyAdopterFeatures, err := ffService.LoadEarlyAdopterFeaturesByOrg(r.Context(), org.UID) if err != nil { _ = render.Render(w, r, util.NewServiceErrResponse(err)) @@ -379,7 +378,7 @@ func (h *Handler) updateFeatureFlag(w http.ResponseWriter, r *http.Request, feat feature.EnabledAt = null.TimeFrom(time.Now()) } - ffService := feature_flags.New(h.A.Logger, h.A.DB) + ffService := h.A.FeatureFlagService err := ffService.UpsertEarlyAdopterFeature(r.Context(), feature) if err != nil { _ = render.Render(w, r, util.NewServiceErrResponse(err)) @@ -425,7 +424,7 @@ func (h *Handler) GetAllFeatureFlags(w http.ResponseWriter, r *http.Request) { return } - ffService := feature_flags.New(h.A.Logger, h.A.DB) + ffService := h.A.FeatureFlagService flags, err := ffService.LoadFeatureFlags(r.Context()) if err != nil { _ = render.Render(w, r, util.NewServiceErrResponse(err)) @@ -485,7 +484,7 @@ func (h *Handler) GetOrganisationOverrides(w http.ResponseWriter, r *http.Reques return } - ffService := feature_flags.New(h.A.Logger, h.A.DB) + ffService := h.A.FeatureFlagService overrides, err := ffService.LoadFeatureFlagOverridesByOwner(r.Context(), "organisation", orgID) if err != nil { _ = render.Render(w, r, util.NewServiceErrResponse(err)) @@ -541,7 +540,7 @@ func (h *Handler) UpdateOrganisationOverride(w http.ResponseWriter, r *http.Requ return } - ffService := feature_flags.New(h.A.Logger, h.A.DB) + ffService := h.A.FeatureFlagService // Fetch the feature flag featureFlag, err := ffService.FetchFeatureFlagByKey(r.Context(), overrideRequest.FeatureKey) @@ -600,7 +599,7 @@ func (h *Handler) DeleteOrganisationOverride(w http.ResponseWriter, r *http.Requ return } - ffService := feature_flags.New(h.A.Logger, h.A.DB) + ffService := h.A.FeatureFlagService // Fetch the feature flag to get its ID featureFlag, err := ffService.FetchFeatureFlagByKey(r.Context(), featureKey) @@ -912,7 +911,7 @@ func (h *Handler) UpdateFeatureFlag(w http.ResponseWriter, r *http.Request) { return } - ffService := feature_flags.New(h.A.Logger, h.A.DB) + ffService := h.A.FeatureFlagService // Fetch the feature flag featureFlag, err := ffService.FetchFeatureFlagByKey(r.Context(), featureKey) diff --git a/api/types/types.go b/api/types/types.go index 722260d208..eecf2d8179 100644 --- a/api/types/types.go +++ b/api/types/types.go @@ -9,6 +9,7 @@ import ( "github.com/frain-dev/convoy/config" "github.com/frain-dev/convoy/database" "github.com/frain-dev/convoy/datastore" + "github.com/frain-dev/convoy/internal/feature_flags" "github.com/frain-dev/convoy/internal/pkg/billing" "github.com/frain-dev/convoy/internal/pkg/fflag" "github.com/frain-dev/convoy/internal/pkg/license" @@ -21,6 +22,7 @@ type APIOptions struct { FFlag *fflag.FFlag FeatureFlagFetcher fflag.FeatureFlagFetcher EarlyAdopterFeatureFetcher fflag.EarlyAdopterFeatureFetcher + FeatureFlagService *feature_flags.Service DB database.Database Redis redis.UniversalClient Queue queue.Queuer diff --git a/cmd/server/server.go b/cmd/server/server.go index 5478f5f8d8..ce0e5d7591 100644 --- a/cmd/server/server.go +++ b/cmd/server/server.go @@ -142,6 +142,7 @@ func StartConvoyServer(a *cli.App) error { FFlag: flag, FeatureFlagFetcher: featureFlagSvc, EarlyAdopterFeatureFetcher: featureFlagSvc, + FeatureFlagService: featureFlagSvc, DB: a.DB, Queue: a.Queue, Logger: lo, From 761db874b9fc24a6d34ca09ddd7f045b0c68a1f3 Mon Sep 17 00:00:00 2001 From: Raymond Tukpe Date: Wed, 15 Apr 2026 15:16:17 +0200 Subject: [PATCH 3/7] refactor: remove package-level logger from postgres and make logger explicit Replaced the default package-level logger (`pkgLogger`) in the postgres module with explicit logger injection. Updated affected functions to accept and use the logger parameter consistently. --- database/postgres/pkg_logger.go | 8 -------- database/postgres/postgres.go | 14 +++++++------- 2 files changed, 7 insertions(+), 15 deletions(-) delete mode 100644 database/postgres/pkg_logger.go diff --git a/database/postgres/pkg_logger.go b/database/postgres/pkg_logger.go deleted file mode 100644 index 4aa854c360..0000000000 --- a/database/postgres/pkg_logger.go +++ /dev/null @@ -1,8 +0,0 @@ -package postgres - -import ( - log "github.com/frain-dev/convoy/pkg/logger" -) - -// pkgLogger is the default package-level logger used when no logger is explicitly provided. -var pkgLogger log.Logger = log.New("postgres", log.LevelInfo) diff --git a/database/postgres/postgres.go b/database/postgres/postgres.go index f9594d3673..deb015a03c 100644 --- a/database/postgres/postgres.go +++ b/database/postgres/postgres.go @@ -41,14 +41,14 @@ type Postgres struct { logger log.Logger } -func NewDB(cfg config.Configuration) (*Postgres, error) { - return NewDBWithLogger(cfg, pkgLogger) +func NewDB(cfg config.Configuration, logger log.Logger) (*Postgres, error) { + return NewDBWithLogger(cfg, logger) } func NewDBWithLogger(cfg config.Configuration, logger log.Logger) (*Postgres, error) { dbConfig := cfg.Database - primary, err := parseDBConfig(dbConfig) + primary, err := parseDBConfig(dbConfig, logger, "primary ") primary.id = 0 primary.logger = logger replicas := make([]*Postgres, 0) @@ -59,7 +59,7 @@ func NewDBWithLogger(cfg config.Configuration, logger log.Logger) (*Postgres, er if replica.Scheme == "" { replica.Scheme = dbConfig.Scheme } - r, e := parseDBConfig(replica, "replica ") + r, e := parseDBConfig(replica, logger, "replica ") if e != nil { return nil, e } @@ -86,7 +86,7 @@ func NewFromConnection(pool *pgxpool.Pool) *Postgres { return &Postgres{dbx: db, pool: pool, conn: pool} } -func parseDBConfig(dbConfig config.DatabaseConfiguration, src ...string) (*Postgres, error) { +func parseDBConfig(dbConfig config.DatabaseConfiguration, logger log.Logger, src ...string) (*Postgres, error) { pgxCfg, err := pgxpool.ParseConfig(dbConfig.BuildDsn()) if err != nil { return nil, fmt.Errorf("failed to create %sconnection pool: %w", src, err) @@ -97,7 +97,7 @@ func parseDBConfig(dbConfig config.DatabaseConfiguration, src ...string) (*Postg maxConns := dbConfig.SetMaxOpenConnections if maxConns <= 0 { maxConns = 100 - pkgLogger.Warn(fmt.Sprintf("[%s]: SetMaxOpenConnections not set or 0, using default: %d. Set CONVOY_DB_MAX_OPEN_CONN to override.", pkgName, maxConns)) + logger.Warn(fmt.Sprintf("[%s]: SetMaxOpenConnections not set or 0, using default: %d. Set CONVOY_DB_MAX_OPEN_CONN to override.", pkgName, maxConns)) } pgxCfg.MaxConns = int32(maxConns) @@ -111,7 +111,7 @@ func parseDBConfig(dbConfig config.DatabaseConfiguration, src ...string) (*Postg pool, err := pgxpool.NewWithConfig(context.Background(), pgxCfg) if err != nil { defer pool.Close() - return nil, fmt.Errorf("[%s]: failed to open %sdatabase - %v", pkgName, src, err) + return nil, fmt.Errorf("[%s]: failed to open %s database - %v", pkgName, src, err) } sqlDB := stdlib.OpenDBFromPool(pool) From 5ddd526de0b45d7df20b64ee45ba65a11417ef90 Mon Sep 17 00:00:00 2001 From: Raymond Tukpe Date: Wed, 15 Apr 2026 15:16:25 +0200 Subject: [PATCH 4/7] refactor: simplify job count query and remove unused error constant - Replace `COALESCE(COUNT(DISTINCT(id)), 0)` with `COUNT(id)` in job count queries for consistency and clarity. - Remove unused `ErrJobNotCreated` constant from `impl.go`. --- internal/jobs/impl.go | 1 - internal/jobs/queries.sql | 2 +- internal/jobs/repo/queries.sql.go | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/internal/jobs/impl.go b/internal/jobs/impl.go index 67b66777c3..e8a739aa6c 100644 --- a/internal/jobs/impl.go +++ b/internal/jobs/impl.go @@ -15,7 +15,6 @@ import ( ) var ( - ErrJobNotCreated = errors.New("job could not be created") ErrJobNotUpdated = errors.New("job could not be updated") ErrJobNotDeleted = errors.New("job could not be deleted") ) diff --git a/internal/jobs/queries.sql b/internal/jobs/queries.sql index af6a5450e0..49e36105fc 100644 --- a/internal/jobs/queries.sql +++ b/internal/jobs/queries.sql @@ -73,7 +73,7 @@ ORDER BY CASE WHEN @direction::text = 'next' THEN id END DESC; -- name: CountPrevJobs :one -SELECT COALESCE(COUNT(DISTINCT(id)), 0) AS count +SELECT COUNT(id) AS count FROM convoy.jobs WHERE deleted_at IS NULL AND project_id = @project_id diff --git a/internal/jobs/repo/queries.sql.go b/internal/jobs/repo/queries.sql.go index 59d07a427d..bb4c501aad 100644 --- a/internal/jobs/repo/queries.sql.go +++ b/internal/jobs/repo/queries.sql.go @@ -13,7 +13,7 @@ import ( ) const countPrevJobs = `-- name: CountPrevJobs :one -SELECT COALESCE(COUNT(DISTINCT(id)), 0) AS count +SELECT COUNT(id) AS count FROM convoy.jobs WHERE deleted_at IS NULL AND project_id = $1 From 3f6dc1e1e2fc1d56cd2f7f90cddddf9454651ac1 Mon Sep 17 00:00:00 2001 From: Raymond Tukpe Date: Wed, 15 Apr 2026 15:16:30 +0200 Subject: [PATCH 5/7] test: remove outdated postgres integration tests - Deleted `postgres_test.go` as it no longer aligns with the current database structure and testing strategy. --- database/postgres/postgres_test.go | 110 ----------------------------- 1 file changed, 110 deletions(-) delete mode 100644 database/postgres/postgres_test.go diff --git a/database/postgres/postgres_test.go b/database/postgres/postgres_test.go deleted file mode 100644 index 68eba36748..0000000000 --- a/database/postgres/postgres_test.go +++ /dev/null @@ -1,110 +0,0 @@ -//go:build integration - -package postgres - -import ( - "context" - "fmt" - "os" - "sync" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/frain-dev/convoy/config" - "github.com/frain-dev/convoy/database" - "github.com/frain-dev/convoy/database/hooks" - "github.com/frain-dev/convoy/datastore" - "github.com/frain-dev/convoy/internal/pkg/keys" - log "github.com/frain-dev/convoy/pkg/logger" -) - -func getConfig() config.Configuration { - _ = os.Setenv("CONVOY_DB_HOST", os.Getenv("TEST_REDIS_HOST")) - _ = os.Setenv("CONVOY_REDIS_SCHEME", os.Getenv("TEST_REDIS_SCHEME")) - _ = os.Setenv("CONVOY_REDIS_PORT", os.Getenv("TEST_REDIS_PORT")) - - _ = os.Setenv("CONVOY_DB_HOST", os.Getenv("TEST_DB_HOST")) - _ = os.Setenv("CONVOY_DB_SCHEME", os.Getenv("TEST_DB_SCHEME")) - _ = os.Setenv("CONVOY_DB_USERNAME", os.Getenv("TEST_DB_USERNAME")) - _ = os.Setenv("CONVOY_DB_PASSWORD", os.Getenv("TEST_DB_PASSWORD")) - _ = os.Setenv("CONVOY_DB_DATABASE", os.Getenv("TEST_DB_DATABASE")) - _ = os.Setenv("CONVOY_DB_PORT", os.Getenv("TEST_DB_PORT")) - - _ = os.Setenv("CONVOY_LOCAL_ENCRYPTION_KEY", "test-key") - - err := config.LoadConfig("") - if err != nil { - log.Fatal(err) - } - - cfg, err := config.Get() - if err != nil { - log.Fatal(err) - } - - km, err := keys.NewLocalKeyManager("test") - if err != nil { - log.Fatal(err) - } - if km.IsSet() { - if _, err = km.GetCurrentKeyFromCache(); err != nil { - log.Fatal(err) - } - } - if err = keys.Set(km); err != nil { - log.Fatal(err) - } - - return cfg -} - -var ( - once = sync.Once{} - _db *Postgres -) - -func getDB(t *testing.T) (database.Database, func()) { - once.Do(func() { - var err error - - dbHooks := hooks.Init() - dbHooks.RegisterHook(datastore.EndpointCreated, func(ctx context.Context, data interface{}, changelog interface{}) {}) - - _db, err = NewDB(getConfig()) - require.NoError(t, err) - }) - - return _db, func() { - require.NoError(t, _db.truncateTables()) - } -} - -func (p *Postgres) truncateTables() error { - tables := ` - convoy.event_deliveries, - convoy.events, - convoy.api_keys, - convoy.subscriptions, - convoy.source_verifiers, - convoy.sources, - convoy.configurations, - convoy.devices, - convoy.portal_links, - convoy.organisation_invites, - convoy.applications, - convoy.endpoints, - convoy.projects, - convoy.project_configurations, - convoy.organisation_members, - convoy.organisations, - convoy.users - ` - - _, err := p.dbx.ExecContext(context.Background(), fmt.Sprintf("TRUNCATE %s CASCADE;", tables)) - if err != nil { - return err - } - - return nil -} From a3f079bebaa4d9ad6596b30d14c65ba9694c7b9f Mon Sep 17 00:00:00 2001 From: Raymond Tukpe Date: Wed, 15 Apr 2026 15:55:46 +0200 Subject: [PATCH 6/7] refactor: inject logger into database initialization and migration commands - Updated `postgres.NewDB` calls to explicitly pass logger instances across CLI utilities and migration commands. - Replaced package-level log usage with `a.Logger` for consistent logging. - Improved logging in migration commands (`up`, `down`, `create`) and other utilities. --- cmd/hooks/hooks.go | 2 +- cmd/migrate/migrate.go | 51 ++++++++++++++++------------------ cmd/utils/circuit_breakers.go | 2 +- cmd/utils/init_encryption.go | 2 +- cmd/utils/revert_encryption.go | 2 +- cmd/utils/rotate_key.go | 2 +- internal/jobs/impl_test.go | 1 + 7 files changed, 30 insertions(+), 32 deletions(-) diff --git a/cmd/hooks/hooks.go b/cmd/hooks/hooks.go index 355c5d94fd..1981dfb229 100644 --- a/cmd/hooks/hooks.go +++ b/cmd/hooks/hooks.go @@ -103,7 +103,7 @@ func PreRun(app *cli.App, db *postgres.Postgres) func(cmd *cobra.Command, args [ lo := log.New("convoy", logLevel) - postgresDB, err := postgres.NewDB(cfg) + postgresDB, err := postgres.NewDB(cfg, lo) if err != nil { return errors.New("failed to connect to postgres with err: " + err.Error()) } diff --git a/cmd/migrate/migrate.go b/cmd/migrate/migrate.go index db8fe527b1..a50d0483bc 100644 --- a/cmd/migrate/migrate.go +++ b/cmd/migrate/migrate.go @@ -11,7 +11,6 @@ import ( "github.com/frain-dev/convoy/database/postgres" "github.com/frain-dev/convoy/internal/pkg/cli" "github.com/frain-dev/convoy/internal/pkg/migrator" - log "github.com/frain-dev/convoy/pkg/logger" ) func AddMigrateCommand(a *cli.App) *cobra.Command { @@ -20,14 +19,14 @@ func AddMigrateCommand(a *cli.App) *cobra.Command { Short: "Convoy migrations", } - cmd.AddCommand(addUpCommand()) - cmd.AddCommand(addDownCommand()) - cmd.AddCommand(addCreateCommand()) + cmd.AddCommand(addUpCommand(a)) + cmd.AddCommand(addDownCommand(a)) + cmd.AddCommand(addCreateCommand(a)) return cmd } -func addUpCommand() *cobra.Command { +func addUpCommand(a *cli.App) *cobra.Command { cmd := &cobra.Command{ Use: "up", Aliases: []string{"migrate-up"}, @@ -37,28 +36,26 @@ func addUpCommand() *cobra.Command { "ShouldBootstrap": "false", }, Run: func(cmd *cobra.Command, args []string) { - lo := log.New("convoy", log.LevelInfo) - - lo.Info("Running migrations...") + a.Logger.Info("Running migrations...") cfg, err := config.Get() if err != nil { - lo.Fatal("Error fetching the config.", "error", err) + a.Logger.Fatal("Error fetching the config.", "error", err) } - db, err := postgres.NewDB(cfg) + db, err := postgres.NewDB(cfg, a.Logger) if err != nil { - lo.Fatal(err) + a.Logger.Fatal(err) } defer db.Close() - m := migrator.NewWithLogger(db, lo) + m := migrator.NewWithLogger(db, a.Logger) err = m.Up() if err != nil { - lo.Fatalf("migration up failed with error: %+v", err) + a.Logger.Fatalf("migration up failed with error: %+v", err) } - lo.Info("Migration completed successfully.") + a.Logger.Info("Migration completed successfully.") os.Exit(0) }, @@ -67,7 +64,7 @@ func addUpCommand() *cobra.Command { return cmd } -func addDownCommand() *cobra.Command { +func addDownCommand(a *cli.App) *cobra.Command { var maxMigrations int cmd := &cobra.Command{ @@ -79,26 +76,26 @@ func addDownCommand() *cobra.Command { "ShouldBootstrap": "false", }, Run: func(cmd *cobra.Command, args []string) { - lo := log.New("convoy", log.LevelInfo) + a.Logger.Info("Rolling back migrations...") cfg, err := config.Get() if err != nil { - lo.Fatal("Error fetching the config.", "error", err) + a.Logger.Fatal("Error fetching the config.", "error", err) } - db, err := postgres.NewDB(cfg) + db, err := postgres.NewDB(cfg, a.Logger) if err != nil { - lo.Fatal(err) + a.Logger.Fatal(err) } defer db.Close() - m := migrator.NewWithLogger(db, lo) + m := migrator.NewWithLogger(db, a.Logger) err = m.Down(maxMigrations) if err != nil { - lo.Fatalf("migration down failed with error: %+v", err) + a.Logger.Fatalf("migration down failed with error: %+v", err) } - lo.Info("Migration completed successfully.") + a.Logger.Info("Migration completed successfully.") os.Exit(0) }, @@ -109,7 +106,7 @@ func addDownCommand() *cobra.Command { return cmd } -func addCreateCommand() *cobra.Command { +func addCreateCommand(a *cli.App) *cobra.Command { cmd := &cobra.Command{ Use: "create", Short: "creates a new migration file", @@ -118,12 +115,12 @@ func addCreateCommand() *cobra.Command { "ShouldBootstrap": "false", }, Run: func(cmd *cobra.Command, args []string) { - lo := log.New("convoy", log.LevelInfo) + a.Logger.Info("Creating new migration file...") fileName := fmt.Sprintf("sql/%v.sql", time.Now().Unix()) f, err := os.Create(fileName) if err != nil { - lo.Fatal(err) + a.Logger.Fatal(err) } defer f.Close() @@ -132,11 +129,11 @@ func addCreateCommand() *cobra.Command { for _, line := range lines { _, err := f.WriteString(line + "\n\n") if err != nil { - lo.Fatal(err) + a.Logger.Fatal(err) } } - lo.Infof("Created migration: %s", fileName) + a.Logger.Infof("Created migration: %s", fileName) }, } diff --git a/cmd/utils/circuit_breakers.go b/cmd/utils/circuit_breakers.go index e4b1036e40..17e40ef36e 100644 --- a/cmd/utils/circuit_breakers.go +++ b/cmd/utils/circuit_breakers.go @@ -45,7 +45,7 @@ func AddCircuitBreakersGetCommand(a *cli.App) *cobra.Command { // Remove the "breaker:" prefix if present breakerID = strings.TrimPrefix(breakerID, "breaker:") - // Create circuit breaker manager with config provider + // Create a circuit breaker manager with the config provider cbManager, err := cb.NewCircuitBreakerManager( cb.ConfigProviderOption(func(projectID string) *cb.CircuitBreakerConfig { // For get command, we don't have projectID yet, so use defaults diff --git a/cmd/utils/init_encryption.go b/cmd/utils/init_encryption.go index 2d7f53429f..97b6ca0edb 100644 --- a/cmd/utils/init_encryption.go +++ b/cmd/utils/init_encryption.go @@ -63,7 +63,7 @@ func AddInitEncryptionCommand(a *cli.App) *cobra.Command { a.Logger.Info("Initializing encryption with the current encryption key...") - db, err := postgres.NewDB(cfg) + db, err := postgres.NewDB(cfg, a.Logger) if err != nil { a.Logger.Error("Error connecting to database.", "error", err) return err diff --git a/cmd/utils/revert_encryption.go b/cmd/utils/revert_encryption.go index ab80ecface..7fda0b3105 100644 --- a/cmd/utils/revert_encryption.go +++ b/cmd/utils/revert_encryption.go @@ -51,7 +51,7 @@ func AddRevertEncryptionCommand(a *cli.App) *cobra.Command { slog.Info("Reverting encryption with the current encryption key...") - db, err := postgres.NewDB(cfg) + db, err := postgres.NewDB(cfg, a.Logger) if err != nil { slog.Error("Error connecting to database.", "error", err) return err diff --git a/cmd/utils/rotate_key.go b/cmd/utils/rotate_key.go index 5459b6989b..340c7ee81a 100644 --- a/cmd/utils/rotate_key.go +++ b/cmd/utils/rotate_key.go @@ -64,7 +64,7 @@ func AddRotateKeyCommand(a *cli.App) *cobra.Command { return ErrOldEncryptionKeyMismatch } - db, err := postgres.NewDB(cfg) + db, err := postgres.NewDB(cfg, a.Logger) if err != nil { slog.Error("Error connecting to database.", "error", err) return err diff --git a/internal/jobs/impl_test.go b/internal/jobs/impl_test.go index 3ecc73594c..48aba505f9 100644 --- a/internal/jobs/impl_test.go +++ b/internal/jobs/impl_test.go @@ -9,6 +9,7 @@ import ( "github.com/oklog/ulid/v2" "github.com/stretchr/testify/require" + "github.com/frain-dev/convoy/config" "github.com/frain-dev/convoy/database" "github.com/frain-dev/convoy/database/hooks" From 50bc8c6e6c159b3d07eaf07726d525fb5fda865f Mon Sep 17 00:00:00 2001 From: Raymond Tukpe Date: Wed, 15 Apr 2026 17:11:46 +0200 Subject: [PATCH 7/7] refactor: enhance logging and error handling in migration commands - Inject logger into CLI initialization and migrate commands for consistent logging. - Replace `Run` with `RunE` in Cobra commands to standardize error propagation. - Remove `os.Exit` calls in favor of returning errors for better control flow. --- cmd/main.go | 2 ++ cmd/migrate/migrate.go | 28 ++++++++++++++-------------- 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/cmd/main.go b/cmd/main.go index 58b863dd37..0eec3167e3 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -22,6 +22,7 @@ import ( "github.com/frain-dev/convoy/config" "github.com/frain-dev/convoy/database/postgres" "github.com/frain-dev/convoy/internal/pkg/cli" + log "github.com/frain-dev/convoy/pkg/logger" ) func main() { @@ -34,6 +35,7 @@ func main() { app := &cli.App{} app.Version = convoy.GetVersion() db := &postgres.Postgres{} + app.Logger = log.New("convoy", log.LevelDebug) c := cli.NewCli(app) diff --git a/cmd/migrate/migrate.go b/cmd/migrate/migrate.go index a50d0483bc..16eac53986 100644 --- a/cmd/migrate/migrate.go +++ b/cmd/migrate/migrate.go @@ -35,29 +35,29 @@ func addUpCommand(a *cli.App) *cobra.Command { "CheckMigration": "false", "ShouldBootstrap": "false", }, - Run: func(cmd *cobra.Command, args []string) { + RunE: func(cmd *cobra.Command, args []string) error { a.Logger.Info("Running migrations...") cfg, err := config.Get() if err != nil { - a.Logger.Fatal("Error fetching the config.", "error", err) + return err } db, err := postgres.NewDB(cfg, a.Logger) if err != nil { - a.Logger.Fatal(err) + return err } defer db.Close() m := migrator.NewWithLogger(db, a.Logger) err = m.Up() if err != nil { - a.Logger.Fatalf("migration up failed with error: %+v", err) + return err } a.Logger.Info("Migration completed successfully.") - os.Exit(0) + return nil }, } @@ -75,29 +75,28 @@ func addDownCommand(a *cli.App) *cobra.Command { "CheckMigration": "false", "ShouldBootstrap": "false", }, - Run: func(cmd *cobra.Command, args []string) { + RunE: func(cmd *cobra.Command, args []string) error { a.Logger.Info("Rolling back migrations...") cfg, err := config.Get() if err != nil { - a.Logger.Fatal("Error fetching the config.", "error", err) + return err } db, err := postgres.NewDB(cfg, a.Logger) if err != nil { - a.Logger.Fatal(err) + return err } defer db.Close() m := migrator.NewWithLogger(db, a.Logger) err = m.Down(maxMigrations) if err != nil { - a.Logger.Fatalf("migration down failed with error: %+v", err) + return err } a.Logger.Info("Migration completed successfully.") - - os.Exit(0) + return nil }, } @@ -114,13 +113,13 @@ func addCreateCommand(a *cli.App) *cobra.Command { "CheckMigration": "false", "ShouldBootstrap": "false", }, - Run: func(cmd *cobra.Command, args []string) { + RunE: func(cmd *cobra.Command, args []string) error { a.Logger.Info("Creating new migration file...") fileName := fmt.Sprintf("sql/%v.sql", time.Now().Unix()) f, err := os.Create(fileName) if err != nil { - a.Logger.Fatal(err) + return err } defer f.Close() @@ -129,11 +128,12 @@ func addCreateCommand(a *cli.App) *cobra.Command { for _, line := range lines { _, err := f.WriteString(line + "\n\n") if err != nil { - a.Logger.Fatal(err) + return err } } a.Logger.Infof("Created migration: %s", fileName) + return nil }, }