diff --git a/.gitignore b/.gitignore index b0fa4ecb..84ea7826 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,7 @@ *.pem *.generated cmd/wfx-viewer/wfx-viewer +cmd/wfx-loadtest/wfx-loadtest /public /result /dist diff --git a/.prettierignore b/.prettierignore index 24f0df05..c5c219ab 100644 --- a/.prettierignore +++ b/.prettierignore @@ -9,4 +9,4 @@ public test/test_helper/**/* hugo/**/*.html **/build -ui/priv/** +ui/dist/** diff --git a/CHANGELOG.md b/CHANGELOG.md index 369a08e7..e2c45876 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,11 +7,24 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Breaking + +- Omit pagination metadata from responses by default. To restore the previous behavior, append the query parameter `pagination=true` to the `/jobs` and `/workflows` endpoints. + ### Added +- Log all SQL queries when `--log-level` is set to `trace` +- wfx-loadtest: add `populate` sub-command to seed a database with sample data + +### Removed + +- Retry mechanism for storage initialization +- Retry loop when creating network listeners + ### Changed - Distro-compliant package naming +- Querying jobs by tags is about 3x faster (e.g. ~1s instead of ~3s for 1 million jobs) ### Fixed diff --git a/api/wfx.go b/api/wfx.go index 4d60ce96..a5ef97eb 100644 --- a/api/wfx.go +++ b/api/wfx.go @@ -131,6 +131,9 @@ func (server WfxServer) GetJobs(ctx context.Context, request api.GetJobsRequestO if request.Params.ParamLimit != nil { pagination.Limit = *request.Params.ParamLimit } + if request.Params.ParamPagination != nil { + pagination.ComputeTotal = *request.Params.ParamPagination + } jobs, err := job.QueryJobs(ctx, server.storage, filter, pagination, (*string)(request.Params.ParamSort)) if err != nil { @@ -402,6 +405,10 @@ func (server WfxServer) GetWorkflows(ctx context.Context, request api.GetWorkflo limit = *request.Params.ParamLimit } pagination := persistence.PaginationParams{Offset: offset, Limit: limit} + if request.Params.ParamPagination != nil { + pagination.ComputeTotal = *request.Params.ParamPagination + } + log := logging.LoggerFromCtx(ctx) workflows, err := workflow.QueryWorkflows(ctx, server.storage, pagination, (*string)(request.Params.ParamSort)) if err != nil { diff --git a/cmd/wfx-loadtest/cmd/populate/main.go b/cmd/wfx-loadtest/cmd/populate/main.go new file mode 100644 index 00000000..4fa49e43 --- /dev/null +++ b/cmd/wfx-loadtest/cmd/populate/main.go @@ -0,0 +1,48 @@ +package populate + +import ( + "fmt" + "runtime" + "slices" + "strings" + + "github.com/Southclaws/fault" + "github.com/knadh/koanf/v2" + "github.com/siemens/wfx/cmd/wfx/cmd/config" + "github.com/siemens/wfx/persistence" + "github.com/spf13/cobra" +) + +var ( + flagCount = "count" + flagWorkers = "workers" +) + +func NewCommand(k *koanf.Koanf) *cobra.Command { + cmd := &cobra.Command{ + Use: "populate", + Short: "Populate database with jobs", + RunE: func(cmd *cobra.Command, args []string) error { + return fault.Wrap(run(cmd, k)) + }, + } + + f := cmd.Flags() + f.Int(flagCount, 1000, "number of jobs to create") + f.Int(flagWorkers, runtime.NumCPU(), "number of concurrent workers") + + supportedStorages := persistence.Storages() + defaultStorage := supportedStorages[0] + if slices.Index(supportedStorages, config.PreferedStorage) != -1 { + defaultStorage = config.PreferedStorage + } + f.String(config.StorageFlag, defaultStorage, fmt.Sprintf("persistence storage. one of: [%s]", strings.Join(supportedStorages, ", "))) + + var storageOpts string + if defaultStorage == config.PreferedStorage { + storageOpts = config.SqliteDefaultOpts + } + f.String(config.StorageOptFlag, storageOpts, "storage options") + + return cmd +} diff --git a/cmd/wfx-loadtest/cmd/populate/run.go b/cmd/wfx-loadtest/cmd/populate/run.go new file mode 100644 index 00000000..63c83807 --- /dev/null +++ b/cmd/wfx-loadtest/cmd/populate/run.go @@ -0,0 +1,84 @@ +package populate + +import ( + "fmt" + "math/rand" + "time" + + "github.com/Southclaws/fault" + "github.com/knadh/koanf/v2" + "github.com/rs/zerolog/log" + "github.com/siemens/wfx/cmd/wfx/cmd/config" + "github.com/siemens/wfx/generated/api" + "github.com/siemens/wfx/internal/handler/job/definition" + "github.com/siemens/wfx/workflow/dau" + "github.com/spf13/cobra" + "golang.org/x/sync/errgroup" +) + +func run(cmd *cobra.Command, k *koanf.Koanf) error { + ctx := cmd.Context() + + appConfig, err := config.NewAppConfig(cmd.Flags()) + if err != nil { + return fault.Wrap(err) + } + + storage, err := appConfig.InitStorage() + if err != nil { + return fault.Wrap(err) + } + + wf := dau.DirectWorkflow() + wfExisting, err := storage.GetWorkflow(ctx, wf.Name) + if err != nil { + return fault.Wrap(err) + } + if wfExisting == nil { + if _, err := storage.CreateWorkflow(ctx, wf); err != nil { + return fault.Wrap(err) + } + log.Info().Str("name", wf.Name).Msg("Created workflow") + } else { + log.Info().Str("name", wf.Name).Msg("Worfklow already exists") + } + + count := k.Int(flagCount) + workers := k.Int(flagWorkers) + log.Info().Int("count", count).Int("workers", workers).Msg("Populating storage with jobs") + + tags := []string{"EUROPE_WEST", "loadtest"} + + g, ctx := errgroup.WithContext(ctx) + g.SetLimit(workers) + for i := range count { + g.Go(func() error { + now := time.Now() + clientID := fmt.Sprintf("LOAD-%d", i) + state := wf.States[rand.Intn(len(wf.States))] // pick random state + job := api.Job{ + ClientID: clientID, + Workflow: wf, + Mtime: &now, + Stime: &now, + Status: &api.JobStatus{ + ClientID: clientID, + State: state.Name, + }, + Definition: map[string]any{}, + Tags: &tags, + History: &[]api.History{}, + } + job.Status.DefinitionHash = definition.Hash(&job) + _, err := storage.CreateJob(ctx, &job) + return fault.Wrap(err) + }) + } + if err := g.Wait(); err != nil { + return fault.Wrap(err) + } + + log.Info().Int("count", count).Msg("Finished populating storage with jobs") + + return nil +} diff --git a/cmd/wfx-loadtest/loadtest/loadtest.go b/cmd/wfx-loadtest/loadtest/main.go similarity index 91% rename from cmd/wfx-loadtest/loadtest/loadtest.go rename to cmd/wfx-loadtest/loadtest/main.go index a5e4cfc8..1de6c7c1 100644 --- a/cmd/wfx-loadtest/loadtest/loadtest.go +++ b/cmd/wfx-loadtest/loadtest/main.go @@ -20,6 +20,7 @@ import ( "github.com/knadh/koanf/v2" "github.com/rs/zerolog/log" "github.com/siemens/wfx/cmd/wfx-loadtest/wfx" + "github.com/siemens/wfx/cmd/wfxctl/flags" "github.com/siemens/wfx/generated/api" "github.com/siemens/wfx/workflow/dau" vegeta "github.com/tsenart/vegeta/v12/lib" @@ -30,10 +31,6 @@ const ( // Threshold of data points above which series are downsampled. threshold = 4000 - HostFlag = "host" - PortFlag = "port" - MgmtHostFlag = "mgmt-host" - MgmtPortFlag = "mgmt-port" ReadFreqFlag = "read-freq" WriteFreqFlag = "write-freq" DurationFlag = "duration" @@ -53,10 +50,10 @@ var ( ) func Run(k *koanf.Koanf) error { - host = k.String(HostFlag) - port = k.Int(PortFlag) - mgmtHost = k.String(MgmtHostFlag) - mgmtPort = k.Int(MgmtPortFlag) + host = k.String(flags.ClientHostFlag) + port = k.Int(flags.ClientPortFlag) + mgmtHost = k.String(flags.MgmtHostFlag) + mgmtPort = k.Int(flags.MgmtPortFlag) if host == "" || mgmtHost == "" { return errors.New("host or mgmtHost not set") @@ -84,9 +81,7 @@ func Run(k *koanf.Koanf) error { readerResultChan := make(chan vegeta.Result) readerDoneChan := make(chan any) - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { readTargeter := vegeta.NewStaticTargeter( vegeta.Target{ Method: http.MethodGet, @@ -103,14 +98,11 @@ func Run(k *koanf.Koanf) error { readerResultChan <- *res } readerDoneChan <- nil - }() + }) writerResultChan := make(chan vegeta.Result) writerDoneChan := make(chan any) - wg.Add(1) - go func() { - defer wg.Done() - + wg.Go(func() { attacker := newAttacker() for res := range attacker.Attack(writeTargeter, writeRate, duration, "Generate and update jobs") { // forward result to reporter @@ -151,7 +143,7 @@ func Run(k *koanf.Koanf) error { } writerDoneChan <- nil - }() + }) var metrics vegeta.Metrics p := plot.New( @@ -160,10 +152,8 @@ func Run(k *koanf.Koanf) error { plot.Label(plot.ErrorLabeler), ) - wg.Add(1) - go func() { + wg.Go(func() { // collect results - defer wg.Done() doneCounter := 0 for doneCounter < 2 { @@ -182,7 +172,7 @@ func Run(k *koanf.Koanf) error { } metrics.Close() p.Close() - }() + }) wg.Wait() if err := dumpResults(&metrics, p); err != nil { diff --git a/cmd/wfx-loadtest/root.go b/cmd/wfx-loadtest/root.go index 112e119b..bb0b0820 100644 --- a/cmd/wfx-loadtest/root.go +++ b/cmd/wfx-loadtest/root.go @@ -20,6 +20,7 @@ import ( "github.com/knadh/koanf/v2" "github.com/rs/zerolog" "github.com/rs/zerolog/log" + "github.com/siemens/wfx/cmd/wfx-loadtest/cmd/populate" "github.com/siemens/wfx/cmd/wfx-loadtest/loadtest" "github.com/siemens/wfx/cmd/wfxctl/flags" "github.com/siemens/wfx/internal/cmd/man" @@ -32,7 +33,7 @@ func NewCommand() *cobra.Command { Use: "wfx-loadtest", Short: "Run a loadtest against wfx", Example: "wfx-loadtest --duration 10s", - PreRun: func(cmd *cobra.Command, _ []string) { + PersistentPreRun: func(cmd *cobra.Command, _ []string) { envProvider := env.Provider(".", env.Opt{ Prefix: "WFX_", TransformFunc: func(k string, v string) (string, any) { @@ -62,12 +63,13 @@ func NewCommand() *cobra.Command { }, } cmd.AddCommand(man.NewCommand()) + cmd.AddCommand(populate.NewCommand(k)) f := cmd.PersistentFlags() - f.String(loadtest.HostFlag, "localhost", "host") - f.Int(loadtest.PortFlag, 8080, "port") - f.String(loadtest.MgmtHostFlag, "localhost", "management host") - f.Int(loadtest.MgmtPortFlag, 8081, "management port") + f.String(flags.ClientHostFlag, "localhost", "host") + f.Int(flags.ClientPortFlag, 8080, "port") + f.String(flags.MgmtHostFlag, "localhost", "management host") + f.Int(flags.MgmtPortFlag, 8081, "management port") f.String(flags.LogLevelFlag, "info", fmt.Sprintf("set log level. one of: %s,%s,%s,%s,%s,%s,%s", zerolog.TraceLevel.String(), diff --git a/cmd/wfx/cmd/config/appconfig.go b/cmd/wfx/cmd/config/appconfig.go index 4313468a..3ebd1d14 100644 --- a/cmd/wfx/cmd/config/appconfig.go +++ b/cmd/wfx/cmd/config/appconfig.go @@ -24,6 +24,7 @@ import ( "github.com/knadh/koanf/v2" "github.com/rs/zerolog" "github.com/rs/zerolog/log" + "github.com/siemens/wfx/persistence" "github.com/spf13/pflag" ) @@ -137,7 +138,7 @@ func NewAppConfig(flags *pflag.FlagSet) (*AppConfig, error) { // start watching config for _, fp := range fileProviders { - if err := fp.Watch(func(_ interface{}, err error) { + if err := fp.Watch(func(_ any, err error) { if err != nil { return } @@ -187,7 +188,7 @@ func (cfg *AppConfig) StorageOptions() string { storageOpt := cfg.flags.Lookup(StorageOptFlag) changed := storageOpt != nil && storageOpt.Changed // do not return SQLite options for non-SQLite backends - if name != preferedStorage && (!changed || cfg.storageOpts == sqliteDefaultOpts) { + if name != PreferedStorage && (!changed || cfg.storageOpts == SqliteDefaultOpts) { return "" } return cfg.storageOpts @@ -418,3 +419,20 @@ func (cfg *AppConfig) SSEGraceInterval() time.Duration { defer cfg.mutex.RUnlock() return cfg.sseGraceInterval } + +func (cfg *AppConfig) InitStorage() (persistence.Storage, error) { + name, options := cfg.Storage(), cfg.StorageOptions() + log.Debug().Str("name", name).Str("options", options).Msg("Setting up persistent storage") + + // note: storage is shared between north- and southbound API + storage := persistence.GetStorage(name) + if storage == nil { + return nil, fmt.Errorf("unknown storage %s", name) + } + log.Debug().Str("name", name).Msg("Initializing storage") + if err := storage.Initialize(options); err != nil { + return nil, fault.Wrap(err) + } + log.Info().Str("name", name).Msg("Initialized storage") + return storage, nil +} diff --git a/cmd/wfx/cmd/config/appconfig_test.go b/cmd/wfx/cmd/config/appconfig_test.go index 05b91e6f..5114ada8 100644 --- a/cmd/wfx/cmd/config/appconfig_test.go +++ b/cmd/wfx/cmd/config/appconfig_test.go @@ -22,16 +22,20 @@ import ( func TestNewAppConfig(t *testing.T) { cfg, err := NewAppConfig(NewFlagset()) - defer cfg.Stop() require.NoError(t, err) + t.Cleanup(cfg.Stop) // call all methods which do not accept arguments structValue := reflect.ValueOf(cfg) for i := 0; i < structValue.NumMethod(); i++ { method := structValue.Method(i) methodType := method.Type() + methodName := structValue.Type().Method(i).Name if methodType.NumIn() == 0 { - t.Run(methodType.Name(), func(*testing.T) { + t.Run(methodName, func(*testing.T) { + if methodName == "InitStorage" { + return + } _ = method.Call([]reflect.Value{}) }) } @@ -70,7 +74,7 @@ func TestReload(t *testing.T) { _, _ = cfgFile.Write([]byte("log-level: error")) } - for i := 0; i < 500; i++ { + for range 500 { if zerolog.GlobalLevel() == zerolog.ErrorLevel { break } diff --git a/cmd/wfx/cmd/config/flags.go b/cmd/wfx/cmd/config/flags.go index d36a62c2..a4384b1b 100644 --- a/cmd/wfx/cmd/config/flags.go +++ b/cmd/wfx/cmd/config/flags.go @@ -65,8 +65,8 @@ const ( ) const ( - preferedStorage = "sqlite" - sqliteDefaultOpts = "file:wfx.db?_fk=1&_journal=WAL" + PreferedStorage = "sqlite" + SqliteDefaultOpts = "file:wfx.db?_fk=1&_journal=WAL" // should be "short enough", i.e. shorter than the timeout for closing // connections due to inactivity (e.g. by the kernel in its default setting or @@ -114,14 +114,14 @@ func NewFlagset() *pflag.FlagSet { supportedStorages := persistence.Storages() defaultStorage := supportedStorages[0] - if slices.Index(supportedStorages, preferedStorage) != -1 { - defaultStorage = preferedStorage + if slices.Index(supportedStorages, PreferedStorage) != -1 { + defaultStorage = PreferedStorage } f.String(StorageFlag, defaultStorage, fmt.Sprintf("persistence storage. one of: [%s]", strings.Join(supportedStorages, ", "))) var storageOpts string - if defaultStorage == preferedStorage { - storageOpts = sqliteDefaultOpts + if defaultStorage == PreferedStorage { + storageOpts = SqliteDefaultOpts } f.String(StorageOptFlag, storageOpts, "storage options") } diff --git a/cmd/wfx/cmd/root/root.go b/cmd/wfx/cmd/root/root.go index 250bd4fe..e54e871e 100644 --- a/cmd/wfx/cmd/root/root.go +++ b/cmd/wfx/cmd/root/root.go @@ -17,7 +17,6 @@ import ( "strings" "sync" "syscall" - "time" "github.com/Southclaws/fault" "github.com/rs/zerolog/log" @@ -26,7 +25,6 @@ import ( "github.com/siemens/wfx/cmd/wfx/metadata" "github.com/siemens/wfx/internal/cmd/man" "github.com/siemens/wfx/internal/server" - "github.com/siemens/wfx/persistence" "github.com/spf13/cobra" "go.uber.org/automaxprocs/maxprocs" ) @@ -74,7 +72,7 @@ Examples of tasks are installation of firmware or other types of commands issued Str("user", username). Msg("Starting wfx") - storage, err := initStorage(cfg) + storage, err := cfg.InitStorage() if err != nil { return fault.Wrap(err) } @@ -100,10 +98,7 @@ Examples of tasks are installation of firmware or other types of commands issued } var g sync.WaitGroup - g.Add(1) - go func() { - defer g.Done() - + g.Go(func() { err := collection.Start() log.Debug().Msg("Server collection done") if err != nil { @@ -111,7 +106,7 @@ Examples of tasks are installation of firmware or other types of commands issued chErr <- err } close(chErr) - }() + }) // reset error variable err = nil @@ -141,33 +136,3 @@ Examples of tasks are installation of firmware or other types of commands issued _ = cmd.MarkPersistentFlagDirname(config.MgmtPluginsDirFlag) return cmd } - -func initStorage(cfg *config.AppConfig) (persistence.Storage, error) { - name, options := cfg.Storage(), cfg.StorageOptions() - log.Debug().Str("name", name).Str("options", options).Msg("Setting up persistent storage") - - // note: storage is shared between north- and southbound API - storage := persistence.GetStorage(name) - if storage == nil { - return nil, fmt.Errorf("unknown storage %s", name) - } - var err error - for i := 0; i < 300; i++ { - log.Debug().Str("name", name).Msg("Initializing storage") - err = storage.Initialize(options) - if err == nil { - log.Info().Str("name", name).Msg("Initialized storage") - break - } - dur := time.Second - log.Warn(). - Err(err). - Str("storage", name). - Msg("Failed to initialize persistent storage. Trying again in one second...") - time.Sleep(dur) - } - if err != nil { - return nil, fault.Wrap(err) - } - return storage, nil -} diff --git a/generated/api/wfx.openapi.gen.go b/generated/api/wfx.openapi.gen.go index a2dd90b1..dc07ee1d 100644 --- a/generated/api/wfx.openapi.gen.go +++ b/generated/api/wfx.openapi.gen.go @@ -272,14 +272,14 @@ type JobStatus struct { // PaginatedJobList Paginated list of jobs type PaginatedJobList struct { - Content []Job `json:"content"` - Pagination Pagination `json:"pagination"` + Content []Job `json:"content"` + Pagination *Pagination `json:"pagination,omitempty"` } // PaginatedWorkflowList Paginated list of workflows type PaginatedWorkflowList struct { - Content []Workflow `json:"content"` - Pagination Pagination `json:"pagination"` + Content []Workflow `json:"content"` + Pagination *Pagination `json:"pagination,omitempty"` } // Pagination defines model for Pagination. @@ -342,6 +342,9 @@ type paramLimit = int32 // paramOffset defines model for offset. type paramOffset = int64 +// paramPagination defines model for pagination. +type paramPagination = bool + // ResponseFilter defines model for responseFilter. type ResponseFilter = string @@ -377,6 +380,9 @@ type GetJobsParams struct { // ParamTag A list of tags ParamTag *paramTag `form:"tag,omitempty" json:"tag,omitempty"` + // ParamPagination If true, pagination metadata will be included in the response + ParamPagination *paramPagination `form:"pagination,omitempty" json:"pagination,omitempty"` + // ParamWorkflow Filter jobs matching by workflow ParamWorkflow *string `form:"workflow,omitempty" json:"workflow,omitempty"` @@ -479,6 +485,9 @@ type GetWorkflowsParams struct { // ParamSort the order of returned elements ParamSort *paramSort `form:"sort,omitempty" json:"sort,omitempty"` + // ParamPagination If true, pagination metadata will be included in the response + ParamPagination *paramPagination `form:"pagination,omitempty" json:"pagination,omitempty"` + // XResponseFilter Apply a jq-like filter to the response XResponseFilter *ResponseFilter `json:"X-Response-Filter,omitempty"` } @@ -1102,6 +1111,22 @@ func NewGetJobsRequest(server string, params *GetJobsParams) (*http.Request, err } + if params.ParamPagination != nil { + + if queryFrag, err := runtime.StyleParamWithOptions("form", true, "pagination", *params.ParamPagination, runtime.StyleParamOptions{ParamLocation: runtime.ParamLocationQuery, Type: "boolean", Format: ""}); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + } + if params.ParamWorkflow != nil { if queryFrag, err := runtime.StyleParamWithOptions("form", true, "workflow", *params.ParamWorkflow, runtime.StyleParamOptions{ParamLocation: runtime.ParamLocationQuery, Type: "string", Format: ""}); err != nil { @@ -1909,6 +1934,22 @@ func NewGetWorkflowsRequest(server string, params *GetWorkflowsParams) (*http.Re } + if params.ParamPagination != nil { + + if queryFrag, err := runtime.StyleParamWithOptions("form", true, "pagination", *params.ParamPagination, runtime.StyleParamOptions{ParamLocation: runtime.ParamLocationQuery, Type: "boolean", Format: ""}); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + } + queryURL.RawQuery = queryValues.Encode() } @@ -3578,6 +3619,14 @@ func (siw *ServerInterfaceWrapper) GetJobs(w http.ResponseWriter, r *http.Reques return } + // ------------- Optional query parameter "pagination" ------------- + + err = runtime.BindQueryParameterWithOptions("form", true, false, "pagination", r.URL.Query(), ¶ms.ParamPagination, runtime.BindQueryParameterOptions{Type: "boolean", Format: ""}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "pagination", Err: err}) + return + } + // ------------- Optional query parameter "workflow" ------------- err = runtime.BindQueryParameterWithOptions("form", true, false, "workflow", r.URL.Query(), ¶ms.ParamWorkflow, runtime.BindQueryParameterOptions{Type: "string", Format: ""}) @@ -4188,6 +4237,14 @@ func (siw *ServerInterfaceWrapper) GetWorkflows(w http.ResponseWriter, r *http.R return } + // ------------- Optional query parameter "pagination" ------------- + + err = runtime.BindQueryParameterWithOptions("form", true, false, "pagination", r.URL.Query(), ¶ms.ParamPagination, runtime.BindQueryParameterOptions{Type: "boolean", Format: ""}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "pagination", Err: err}) + return + } + headers := r.Header // ------------- Optional header parameter "X-Response-Filter" ------------- @@ -5856,92 +5913,93 @@ func (sh *strictHandler) GetWorkflowsName(w http.ResponseWriter, r *http.Request // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9e3PbtrL4V8Hwd2aazE+S9bJk6f7lxkqqjOvkxurpmRPl9oDkUoJDASwAWlYz+u53", - "8OBT1Muv27Se6TSWCAK7i33vAvrmeGwRMQpUCmf4zYkwxwuQwPUnLyRA5dhXf/sgPE4iSRh1hs5bEkrg", - "6Ia5ArkQMjojdIYkQxiJCDwSEA+Zt9GSyDlKZ6o5RL3/ewx85dQcihfgDJ3cY+HNYYHVinIVqWdCckJn", - "zrrm3NVnrG7f0IC+Ma9dqIczzuJoD6BYgI8YRXIOSI9Xf60Q5oAIdWoO3EUh88EZBjgUUA2qWScPJ5Gw", - "EJUA2y8w53ilPgu5CtUXAeMLpwKfd3rudc2ZEyEZX22i8yNjIWCKghBrchPqhbEPGiPJMRVEDUT2fcQC", - "/eSGuVsInyxUQXfXLFVJ+J/sa+uaE5IFkZuAqmUX+I4s4gWi8cIFroDRpFJwc5Axp1uAMlPmQfIhwHEo", - "nWGrWdPUw9IZOoTKTttJyUyohBnwSoAv9ZTrmsOCQMAWeCvgFF9JhFwIGAckJObSsrmBH3EQcSjFFjzs", - "WpWIlPDodQ/D44OZcl1zOIiIUQGGwTcROo+icIUwuvm9HpKvgAIjCJJpjkjeTiCfA/aBZ6D/q/7Jjqjb", - "BfZIpheSBFCzkuZ3xreQmnHfUNoQEnwEISy0FqqmpZ4qD8M/OATO0Pl/J5kCOzFPxck143JE40UlCdVD", - "I4xYwhEKw4s5V/pMv4ducRjDNlj1zMepsmv9jtIZeFaxmSgkQmpxxjNxoKZSMx1KsQmeXRIhD1FSEzxz", - "1utkYq34zj0Fp6b48JsD+t/Pzvjnn0cX4/PJyKk5v56PJ86X2iYhzm8xCbFLQiJXigax2MReTQwca83G", - "AhQxIYgbAsK5d/W2xAJEQ1HHQqAVtc+WStHE9CtVf20AYVFUX9aVuNeZXheH9YgpUeTOUPIY1DC2UIoh", - "kivz1brmvJmD9/WT1gKbcP8EOJRzRKiRdAV+wDjCyFNvgY/SjVBAR5xFwCUBTQHgnFVI9UQxonob6QFo", - "AULgGdQQMZrePAswCcHXlLjDi0hv55tkLUSZRBywN8duCM6jkkOkO7iL2Sr2XDE+WYCQeBFVY60eo+Uc", - "aA7PJRYI7sCLZRnbdrPdqbea9WZn0moOW6fDTvPfTk7l+lhCXU35mPhnFp+5N+BpadIMAjxjkeI2+yAx", - "CfWf2PeJWeljYcguSubZb10rUe3CzI08RiUmVKD5Fn4Mw02O1HK0gY16dXN7xjRg2SoZHoWVsMtiieSc", - "pHAY41m5zv35SFvG32PCwVcawE70pWKNUUhmSo2U1daby/HoaqJ01tt/VWsLGoehlh2jfdVcibQWd9fT", - "KnqDXD5QSQICHKkBeg+MTcbCaDj1ySiACo0ZspmmSXnaSzZDHuMcQkPw8UXV21ZhVCjZvD7ZfLNEV41Z", - "Cks2bSWh1cyJO7FJJI2oKDjSuzbdkFphgu/G5oVW+6zsa1eJ4rskQiiLYI4M33I65CJ7kGxK4vzfS2Ws", - "E8ucX+TDx9GVoh+hl0Bncu4MWxW7pj0KscsvMCOKsZgWtgTk7VHKgtCEkBVkzO+6Bj+Fpmqvf8oClzKV", - "A0LJJpG/oakTC+AX6jn4U2eIvq3Rekqr1MJCa+wNKlwohwxT39iIV+PrD2e9Zut1Zi1umKttxYL5Su58", - "9EqARO4KLYO719vMAgfsf6DhKtH3lZuyX0m9Z25ON22g9J65FXojF3dnpDLfdtvb+M86aWlQfDhbPnxz", - "Dl9ra2g7MRv1g0ii1wa6xEIiQmsoIFyxeSzRq8vx2w+vG+hcMSgiAkkeUw9L8JULRKRAoZYjBHcegC9Q", - "PhDFYciW4NshjSn9cYVsRFZLGSWJnYlAysCrmQPOFmnIJFBMQxDK84hC4hEZrpASEhBqqLsy/onJfbwS", - "YGLz/9hZ/4N++XSJ0izL64am5UGKLxdzZ6rvrDVol4S22jMjFUmcXyj5PTZIjy/Qq2VwV58BVZ42+K8L", - "7lSn0+zDqefWm/2uV+8O3H59MPC79VPotc46A9z12r6j4Uq0WKdXVmq75anIw0dx70O1Qqi47CjVkDkA", - "BpXHVxXqlYdh5XHQUpHHp4HOQzln8WyO9PTKafMgkjEOwxXC4RKvFFsTIUUNEfmDQAmmyAUPxwLQEpDP", - "6A8SLTGVOkMCnOCQ/AF2SkKRYGpqrETlFTRmDe3hKLjgVuH7uvFodNXR8OGh7ZLxr0HIlvte+TUZVzaA", - "xK+0eu+ZO1KobWpy7CVadc/G6/dNHK1m9O65+5rC+f0/LOhZ15wbY4j2gJkn+qF5z7LraCGwpDEr76Lq", - "eUrD1EX/NDJphYvR5Uj/cX5x8dvk/N11+l3y6ZePF+eT0W/Xk/PJL7nPF6O346vxZPzhqjIr8Z65n4xK", - "322ci7vzRpNcc3ri08/ILdDEGmitVmnPd7l/W6z7htnetKZakpUvqCDKBqMlCUPkqoAjCrEHvknRa29R", - "pxGIQBZlbQUpk0gbk2KI/RTOwQPkuYh+IsHIuqwZ2MvgrpG81/Bx3PAJN0Du9MHLTJwVLFIgtvDwtqzW", - "e+bapFU+Tt5IBO1gN1tfmRNvjoT6U3GcnTKOfJOEfHL/sdrf8BiVcCe3QV1Pq0Tvrz9cIUMwk1iPGJf5", - "JIKdqZBMELE3R1jYJJhNotQU7t5XJDn2QNQQSK/IsFOK0NQJCQWhuPWz/dCaOjX7Z3vqoC9TuiX8yETo", - "Jyzm1TuaE7O5GlRwinrdA92g+9F8a2z/yWQW7PMTRckaCrjyTRVltXtruKMIb6vZ7j4qhBFnMw6iQhre", - "2Oy6UlXJKBQB94BKk5DYKPtYr16B2awqnmxJ8F/hBSTxPIUlSuQXJWn7jGNGVxdH6gUzR5Um+IhnhCqL", - "/J65Wn9tAJaOSIP6G+aKTYWg5MH4GgfFDYnVLtUiI7PaAd7Jx2xkGd3cJLUUsJ3YJ4r5UBIku/NwOmRO", - "3Z+EGHa1IlYPLKemvNtu7paYnGDnROapqqMpWIeVPSWTOKyGQj8qw5JfodVsHrJIae+SknNasjUgVO1e", - "WlzMl3MdLLxc4cl8UtBXupfXiWa6fyYyUVaPl4m8npx/mhyp7vQsVURKnLS8cOb06i+fPnwc/fbr6HpS", - "maPOMru9TVmdpG0O9w23cuXKctFkD9WzFosHkB5s4WFvpjtfoFjXHGWm77FlipdL5D/arKUgWyj0nFX7", - "/mvOI9/J3OVS1QapU7c6b5AnOkYRCOcfP47DkrQQHV6JSLt1cvyaXz9Lx93bX0qktJS3E8CVk3RLfPBR", - "bLJ4y/tEPEXXtMgREZYSuFrvfz7j+h/n9X8364PptD6dNr78/384O6sUB9EvbXnI6NdtDnoHpTMzQTx8", - "vZziKCmZzln3gFWriyF5SHaURta56mlIPLClMBtznUfYmwNqN5pOzYl56AyduZTR8ORkuVw2sH7aYHx2", - "Yl8VJ5fjN6Or61G93Wg25nIRmgq61BueBr8jXRvXZcRb4MJwT6vR0svc1W+JIKZ66gwduFO7jfVELAKK", - "I+IMnU6jqQdHWM41gU9M8Vb9OavyFP47Bq6TjT+kdV6b/NTTmi4OFc0670CaBgkna2bSS7SbzZJ/h6Mo", - "JJ5+9eRGGAVyWF9Lsfyud6FUQQtDJFZCezRxpHNrPKbUKA7TFaWBeqO2oP6GUclZWFx/U3WO7iLCDTK7", - "hn3keGaebR+lxp02O89HkGu2APTKtAW8TkmDOSDbSPMnIUrqepU58IOcA7eZAZ3YwjTpDjL1dr3HluGQ", - "y/wVMt5iw7Q1xYsF5qs9rGwSVZ8dwWI5d1lMfafmUMaTD19KPXFWatQCJzqq2yY/pmmjFAOWC1VEIKC+", - "th/W1zZ1rvw7iNB8M65puFOoRxx8kMAXhIKPdNAyQ5mtbkzpZA4CkvX03g+nFKE6OhceUF+NF4xLxKjJ", - "+ZuHrSYCKjkBHberiUGnUTak/r2NanOtxp+rmTYbclJqd1zX9r4R2pbPvQNZ0lO5d6QwrYP7x1nLtnfg", - "zLoQewemWcADxko808O29zUusPTmahvdVd6RqmoizD0+pp0xC7a/PKF238ipVCl4FKVJhRvmahlpKJi7", - "BwGSulFZo8rnb840bjY7nvm/jipvcUh8WzKwHSpf1oc2XxYbZCpwGAdJhVk5v3Y1p6gEH4WgeyGxuBX0", - "pKJ80osZQpK0OlZDqn2p63fXNSdiVdmhc99HWOfsTEN7UbN8ZOJxVMsX4+eBkD8yf/VotM3VlCoI+565", - "SSZ/jk15JqvgZW6ndX9LEtV6TCC3QpcrKn5f4vMj9lFKegV5p6Ltm3GX+D5Q51l8izIrJ9KyQ0Cw7yv5", - "yHyIE1NO3+pKfHAlJhQRKiQ2bb/KE9D7I9KKMQft2ynN6M0xnUHOOuhGFk4kcIIbaFJwO2JJQvIHCCSA", - "3wKv6/qTAQi9ur4eva6pJTjk2mbUOlPHm8f0K/hTxxDQZ7FSGhSWIaGAXA74q/JBrpiEIZrM04MLmcDa", - "8zo+REB9tSwLEGBvjpjC578QprYMrhEBYRqDkMCSiEC5J2rTkjSa8Y/A31ymMaVvGUeWg2vKmSJ0FiY1", - "9gWZzSUKGfuK9PkK5ZVpN8nHEg/Rt2lauZs6w2kiCL+ZL6dObWpiRf1wfHU9Ob+8HF+9mzrr6ZSq/7a6", - "TqNbe0yipOVKbnzsqo8uIMmSfVnOmUhak8YXKXkYTesh+XL12BfolccWC1wXoNaS4L+2bKBTMIZijT1H", - "usThnkNSeRQV7st2fGz7EqlA5Ia5D8TCzLALhWNALeRoKiEujHgQ5PnCyaMAnzbwIJNlrYTfPNoEvKb7", - "geqECqBCkls4AhM753F4nG/SrnCGRp9V1EekFJ5Ke6T4Wcg8TJUVjgX4evBsxmGmZkrIosumiziUJApB", - "xYlW1Xo6iHK2HsrZjcl+f1nCnTS6vy4kB7womvc0F1bKv2KJD+1DcrY0DE7SDqPxRaO6qJLkoJTHYOaq", - "yBIng6617UDXasZUrZWahyoceoO17nreMD6P7JrcMHeiQ2Uc6nTlMzgn3UeE/YrJt8qZeHywr5hEeuqn", - "8ZdK7lJBK90wt65POYCfiGLSD1JofjkqCpkb5y/N9BoP6xvx1warEKp6CS709/k8S1VkYkYp260bhnba", - "7ffamiXKI8I6PWp1B9mMBY7TJN2tXddojgVyASgyqPqNwzzlR2TXxGB9tzxb9vG3MccBvr7ZBOPu16q9", - "+3cgk8YnpQn3sKD1Hav47/jE25Nx7P7cVnJ64EnTSjuC4Nw3usfTnOc0vo9pyvye4+IX01MhxkrQ8rL1", - "g0jk7h5ZrhnIUhCvTMxJsZN3h7ynzYVHiPxFNvmfWfgfKtH3OlO2TcpTir3IxVFykeO0e4pGvUj8KK7A", - "4Wfmk2C1c/VSbjj+DmXhfinoRxKDie2OLR4dOCAX/X8mtOkJLhF7HggRxGG4erHIf0HNc4D4H6t8NPOs", - "yvpHD2I+zIDWrTzWFVzJW++Zm1MoJZuenfnbas8t8kfY8uukAeGva8cPPUxdqQREen7yRXYOtdr3bmpJ", - "LHZG9F3WGm9dd4ut/l54/UlKxdvZ3CSnk9tWZM5OpwR9XBv9IIF8scp/b6t8b+WSs8jpHLut8VVRDErm", - "ODlaujeVK/HMlHaOyOpOTFXnL6mo7ndxzKbaskU3S//nVFLpieGKbtecVkpS38g00P2lO1xedNjulP12", - "JXBU9r5u7mDcrbe08tiV5tf16iOChO9DGz2/sCsMpCX2iwAcGh7YjoX7BgcJwXe1dEpcvjt6V4Pni7k9", - "wtxi3/9T2lrs+8bS2or+i8H9W+qb3fJ/eFfsEYZWhQXpabgDTrHZsaVLWjZM7z/tlA+UrdJ55oj8MwM1", - "S5bfth5w/thjC3vHQO6+3NOg6/udYHDWPjs97XgD6Hb6uN0N+gHunrYw9JqDVi9oP2DZ2ypEmo1O4/64", - "rA8oDWRaJ2NIzbIV+9pwnv1gWTV3FVg8IZzm3Kyt8+ATZNnRjHxP6AYD/5q/aeN7PZv1LKeNCneYVHYo", - "bt5e8iyMVTqJk9/uex3HKYC/70xO7qTYpt/2eMz1RCnX3JV/G/uZnqfOrqlywXgwz3o45yAYn+6YTq5B", - "z9wcmlzFvHXk2Hhhf7MTPTlBONCBWebvm8xU/Mk3NebIBtStYmiGpoJ4Za4v2NmMWr5FryIwsk+eoCs1", - "PQrx0pr6DK2pxzGtzXAtc1dqHdmkupVN855INY8+NM631x7rMzjJPZ3lU9CPzuTN57UC+T5Ve7snZFfG", - "fIXVX+UE9IuUH5jMS7B7cPvqssB/5gSOoae5uOYER+RkGdyd3La0t2bX2GZbRHrCyh5UJfYOXVF1lq0y", - "a6cw4uQ2uag9Ha0Jlfw+DHJXCGe3XGbn+6om1bfCpDRB5x/H6clXkf9NqpRq1VNklNw2RY7Wa01sfdFO", - "XcIiCpPLlEbZD2RUSFn6MyP6nqfiiOwXOobOoIc7fcCtoO+3m6dB4Aa41e50ul7vrNXut/u5H/Ew9jeR", - "N+VQ2mk1QT0Wh76+FNgF/XsxpmhmL74XknE1h7lRuihKRVBzj/Nwtlqe1+v3e+3moAmtU7c/wK3OWR88", - "3Dt1ce+0AKdJ2WoQFUBBIlvVx7Y21i+MKRCr1QuaeNDq4A508WkbD3qu3+63oNkeeMqlP4RYyZXpya3s", - "xN7JIu2qKLkmpNpfLkJbGlKg2Flw2se+12/6fn/g9QO3G7Ta3Z4LZ7gHzW4B2NRC6GnsjwDpfEEekF37", - "Vh6TB8Xrnrb7rUG/322e9dwenDU7rnsW9MADPDgbDKpBSbdOm2NzmELLRxGifNCxFSQzKA8TtABaQRsD", - "PjsduAPf73RP+4Neqwmds56Pe9UwacWrXQQccsD+yt6Lr28f+t8AAAD//8PwOm82cwAA", + "H4sIAAAAAAAC/+w9aXPbOLJ/BcW3VZPUk2Rdliy9T55YySjldfJizc7WRnmzINmU4FAABwAta1P6769w", + "8BR1+dpNxlVTE0sEge5G392AvjkeW0SMApXCGX5zIszxAiRw/ckLCVA59tXfPgiPk0gSRp2h85aEEji6", + "Ya5ALoSMzgidIckQRiICjwTEQ+ZttCRyjtKZag5R7/8RA185NYfiBThDJ/dYeHNYYLWiXEXqmZCc0Jmz", + "rjl39Rmr2zc0oG/Maxfq4YyzONoDKBbgI0aRnAPS49VfK4Q5IEKdmgN3Uch8cIYBDgVUg2rWycNJJCxE", + "JcD2C8w5XqnPQq5C9UXA+MKpwOednntdc+ZESMZXm+j8zFgImKIgxJrchHph7IPGSHJMBVEDkX0fsUA/", + "uWHuFsInC1XQ3TVLVRL+F/vauuaEZEHkJqBq2QW+I4t4gWi8cIErYDSpFNwcZMzpFqDMlHmQfAhwHEpn", + "2GrWNPWwdIYOobLTdlIyEyphBrwS4Es95brmsCAQsAXeCjjFVxIhFwLGAQmJubRsbuBHHEQcSrEFD7tW", + "JSIlPHrdw/D4YKZc15wIzwjFBvoyMuMASR5DDWWD0AIk9rHEaEnCELmQsI6PiBEIDiJiVMAWZHLrVSJk", + "ReYA5vmYzbSuOcmyRlA3cTmPonCFMLr5ox6Sr4ACI9CSVQI9B+wDz6D+e/2THVG3C+zRMF5IEmDNSlpu", + "Gd/CMoz7hmMMQ4CPIISF1qbVZNRT5WH4C4fAGTr/dZIp4hPzVJxcMy5HNF5UklE9NEoFSzhC8Xkx50ov", + "6/fQLQ7jbVtuZj5OJV/rd5Tuw7OKzUQhEVKrJTwTB2pcNdOhFJvg2SUR8hBlO8EzZ71OJtYK/NxTcGqK", + "D785oP/97Iz/+tfRxfh8MnJqzm/n44nzpbZJiPNbTELskpDIlaJBLDaxVxMDN+LIAhQxIYgbAsK5d/W2", + "xAJEQ1HHQqANjs+WSvRi+pWqvzaAsCiqL+tKbdWZXheH9YgplcKdoVYKzl2dLZSCi+TKfLWuOW/m4H39", + "pLXZJty/AA7lHBFqNJYCP2AcYeSpt8BH6UYooCPOIuCSgKYAcM4qpHqiGFG9jfQAtAAh8AxqiBiLZZ4F", + "mITga0rc4UWkt/NNshaiTCIO2JtjNwTnUckh0h3cxWwVe64YnyxASLyIqrFWj9FyDjSH5xILBHfgxbKM", + "bbvZ7tRbzXqzM2k1h63TYaf5DydnOnwsoa6mfEz8M8+FuTfgaWnSDAI8Y5HiNvsgMQn1n9j3iVnpY2HI", + "Lkrm2W9dK1HtwsyNPEYlJlSg+RZ+DMNNjtRytIGNerXCbNKAZatkeBRWwi6LJZJzksJhnIDKde7PR9oy", + "/hETDr7SAHaiLxVrjEIyU2qkrLbeXI5HVxOls97+vVpb0DgMtewY7avmSqS1uLueVtEb5PKBShIQ4EgN", + "0HtgbDIWRsOpT0YBVGjMkM00TcrTXrIZ8hjnEBqCjy+q3rYKo0LJ5vXJ5pslumrMUliyaSsJrWZO3IlN", + "ImlERSEg2LXphtQKE3w3Ni+02mflmKFKFN8lkU5ZBHNk+JbTIRfZg2RTkiDmXipjnVjm/CIfPo6uFP0I", + "vQQ6k3Nn2KrYNe1RiF1+gRlRjCm1sCUgb4+2FoQmhKwgY37XNfgpNFV7/UsWgJWpHBBKNon8DU2dWAC/", + "UM/BnzpD9G2N1lNapRYWWmNvUOFCOWSY+sZGvBpffzjrNVuvM2txw1xtKxbMV3Lno1cCJHJXaBncvd5m", + "Fjhg/wMNV4m+r9yU/UrqPXNzumkDpffMrdAbufxBRirzbbe9jf+sk5YG94ez5cM35/C1toboE7NRP4kk", + "Cm+gSywkIrSGAsIVm8cSvbocv/3wuoHOFYMiIlTARj0sVTym4k+BQi1HCO48AF+gfECNw5AtwbdDGlP6", + "8wrZQKyWMkqSAyACKQOvZg44W6Qhk0AxDUEozyMKiUdkuEJKSECooe7K+Ccmh/NKgMkx/NPO+k/066dL", + "lGaLXjc0LQ9SfLncQab6zlqDdkloqz0zUpGM+pWSP2KD9PgCvVoGd/UZUOVpg/+64E51Os0+nHpuvdnv", + "evXuwO3XBwO/Wz+FXuusM8Bdr+07Gq5Ei3V6ZaW2W56KPHwU9z5UK4SKy45SDZkDYFB5fFWhXnkYVh4H", + "LRV5fBroPJRzFs/mSE+vnDYPIhnjMFwhHC7xSrE1EVLUEJE/CZRgilzwcCwALQH5jP4k0RJTqTM9wAkO", + "yb/ATkkoEkxNjZWovILGrKE9HAUX3Cp8Xzceja46Gj48tF0y/jUI2XLfK78l48oGkPiVVu89c0cKtU1N", + "jr1Eq+7ZeP2+iaPVjN49d19TOL//hwU965pzYwzRHjDzRD80f1t2HS0EljRm5V1UPU9pmLron0YmrXAx", + "uhzpP84vLn6fnL+7Tr9LPv368eJ8Mvr9enI++TX3+WL0dnw1now/XFVmJd4z95NR6buNc3F33miSa05P", + "fPoZuQWaWAOt1Srt+S73b4t13zDbm9ZUS7LyBRVE2eA0k8khCrEHvik1aG9RpxGIQBZlbQUpk0gbk2KI", + "/RTOwQPkuYh+IsHIuqwZ2MvgrpG81/Bx3PAJN0Du9MHLTJwVXlIgtvDwtqzWe+bapFU+Tt5IBO1gN1sn", + "mhNvjoT6U3GcnTKOfJOEfHL/sdrf8BiVcCe3QV1Pq13vrz9cIUMwUyCIGJf5JIKdqZBMELE3R1jYJJhN", + "otQU7t5XJDn2QNQQSK/IsFOK0NQJCQWhuPWz/dCaOjX7Z3vqoC9TuiX8yEToFyzm1TuaE7O5GlRwinrd", + "A92g+9F8a2z/yWQW7PMTRckaCrjyTRVltXtruKMIb6vZ7j4qhBFnMw6iQhre2Oy6UlXJKBQB94BKk5DY", + "KF9Zr16B2awqAm1J8F/hBSTxPIUlSuQXJWn7jGNGVxdH6gUzR5UmsNUb8N8zV+uvDcDSEWlQf8NcsakQ", + "lDwYX+OguCGx2qWaarEOtmuCfN1pIw1kYNmJcKKLD8U62ZCHo575cf8+/O0CRUQeWPxNObTd3C0XOfHN", + "CcZT1XJTsA4r0komcVgNhX5UhiW/QqvZPGSR0nYlBfK0wGxAqNq9tISYr9U6WHi58pL5pKCvdCKvE/1z", + "/3xjopIeL994PTn/NDlSqelZqoiUuGJ5ecxpz18/ffg4+v230fWkMhOd5W97m+I5SZsy7htU5YqS5dLI", + "HqpnDSEPID3Y8sLefHa+DLGuOcoY32PLFC+XyH+08UpBtlDoOav2/bec372TucsFqQ1Sp85z3uxOdCQi", + "EM4/fhy3JGl4OrzekPYW5fg1v36WdLu3V5RIaSk7J4ArV+iW+OCj2OTqlveJa4oOaJEjIiwlcLXe/33G", + "9X+d1//RrA+m0/p02vjy339xdtYiDqJf2tiQ0a/bHPQOSlpmgnj4ejnFUVIynbPuAatWlzzykOwogKxz", + "NdKQeGALXjayOo+wNwfUbjSdmhPz0Bk6cymj4cnJcrlsYP20wfjsxL4qTi7Hb0ZX16N6u9FszOUiNHVy", + "qTc8DXFHugKui4W3wIXhnlajpZe5q98SQUyN1Bk6cKd2G+uJWAQUR8QZOp1GUw+OsJxrAp+YEq36c1bl", + "KfxvDFynFH9Kq7k2xamnNb0aKmZ13oE0bRBO1rKkl2g3myWXDkdRSDz96smNMArksO6VYpFd70KpThaG", + "SKyE9mjiSGfQeEypURym90kD9UZtQf0No5KzsLj+puoc3UWEG2R2DfvI8cw82z5KjTttdp6PINdsAeiV", + "Kf6/TkmDOSDbLvMfQpTU9Spz4Ac5B27jf52+wjTpATJVdb3HluGQy/wVMt5iwzQvxYsF5qs9rGzSUZ8d", + "wWI5d1lMfafmUMaTD19KnW9WatQCJzp22yY/pjWjFOmVy1FEIKC+th/W1zbVrPw7iNB867Bpq1OoRxx8", + "kMAXhIJv+hlnKLPVjSmdzEFAsp7e++GUIlRH58ID6qvxgnGJGDWZffOw1URAJSego3M1MehkyYbUv7ex", + "a64x+nM102ZDTkpNjeva3jdC26C6dyBLOkD3jhSmQXD/OGvZ9g6cWRdi78A013fAWIlnhwyL8k2ju3od", + "F1h6c7Xp7irvdlU1FuYeH9PimEXjX57QFmzkWarMQdLgC75OOCmJaiiYuwcBkjpdWfPK52/ONG42O575", + "v45Bb3FIfFtGsF0rX9aHNmQWm2YqcBgHSdVZucp2NaeoMh+FoHshsbgVtKqifNKfGUKSyDpWn6p9qet3", + "1zUnYlXpo3PfR1jn8UyzflEPfWTicRTRF+MVgpA/M3/1aLTN1ZkqCPueuUl2f45NySar6mVOqnWWSxLV", + "ekwgt0KXKzR+X+LzM/ZRSnoFeaeiFZxxl/g+UOdZPJEyKyfSskNAsO8r+cg8jhNTYt/qeHxwJSYUESok", + "Nq3Aym/Q+yPSKjIH7QkqzejNMZ1Bzjro5hZOJHCCG2hScFJiSULyLxBIAL8FXtc1KQMQenV9PXpdU0tw", + "yLXSqHWmjjeP6Vfwp44hoM9ipTQoLENCAbkc8FflsVwxCUM0maeHGTKBtWeRfIiA+mpZFiDA3hwxhc//", + "IExtaVwjAsI0CyGBJRGBcmbUpiVJN+NNgb+5TGNK3zKOLAfXlOtF6CxM6u4LMptLFDL2FekzF8qH006V", + "jyUeom/TtJo3dYbTRBB+N19OndrURJb64fjqenJ+eTm+ejd11tMpVf9tdbRGt/boREnLlZz+2FUfXUCS", + "JfuynDORtCuNL1LyMJrWSPIl7LEv0CuPLRa4LkCtJcF/bdlAJ2wMxRp7jquJwz2HpBopKtyX7fjYliZS", + "gcgNcx+IhZlhFwrHgFrI6FRCXBjxIMjzlZVHAT5t6kEmJ1sJv3m0CXhN9wjVCRVAhSS3cAQmds7j8Djf", + "pF3hXI0+h6mPTSk8lfZI8bOQeZgqKxwL8PXg2YzDTM2UkEWXUhdxKEkUgooqrar1dMjlbD2osxuT/f6y", + "hDtpdH9dSA54UTTvaeaslK3FEh/am+RsaSKcpF1H44tGdQkmyVgpj8HMVZFTTgZda9uBrtWMqVorNRRV", + "OPQGa90JvWF8Htk1uWHuRAfWONTJzWdwTrqPCPsVk2+VM/H4YF8xifTUT+Mvldylgla6YW5dn3wAPxHF", + "pEek0BBzVBQyN85fmhc2HtY34q8NViFU9Rdc6O/zWZmqyMSMUrZbNxHttNvvtTVLlEeEdTLV6g6yGQsc", + "p0m6Wzux0RwL5AJQZFD1G4d5yo/IronB+m55tuzjb2OOA3x9swnG3a9Ve/fvQCbNUEoT7mFB6ztW8d/x", + "abon49j9Ka7kRMGTppV2BMG5b3TfpznjaXwf06j5PcfFL6anQoyVoOVl6yeRyN09slwzkKUgXpmYk2J3", + "7w55TxsOjxD5i2zy/2Thf6hE3+uc2TYpTyn2IhdHyUWO0+4pGvUi8aO4Aoe/Mp8Eq52rl3LD8XcoC/dL", + "QT+SGExsx2zxOMEBueh/m9Cmp7pE7HkgRBCH4erFIv+AmucA8T9W+WjmWZX1jx7EfJgBrVt5rCu4krfe", + "MzenUEo2PTsHuNWeW+SPsOXXSbvCj2vHDz1gXakERHqm8kV2DrXa926BSSx2RvRd1hpvXXeLrf5eeP1J", + "SsXb2dwkp5MbWGTOTqcEfVwb/SCBfLHKf26rfG/lkrPI6Ry7rfFVUQxK5jg5bro3lSvxzJR2jsjqTkxV", + "54dUVPe7TGZTbdmim6X/cyqp9BRxRW9sTislqW9k2u1+6A6XFx22O2W/XQkclb2vm3sZd+strTx2pfl1", + "vfqIIOH70EbPL+wKA2mJ/SIAh4YHtmPhvsFBQvBdLZ0Sl+/F3tXg+WJujzC32Pf/I20t9n1jaW1F/8Xg", + "/in1zW75P7wr9ghDq8KC9OzcAWfe7NjSxS0bpvdvdsoHylbp9HNE/paBmiXLb1sPOK3ssYW9kSB3h+5p", + "0PX9TjA4a5+dnna8AXQ7fdzuBv0Ad09bGHrNQasXtB+w7G0VIs1Gp3F/XNYHlAYyrZMxpGbZin1tOM9+", + "DK2auwosnhBOc27W1nnwebPsaEa+J3SDgX/LX8Xx457kyh+SepazSYUrUSr7GTcvQ3kWNiyd28kzx70O", + "7xTA33eCJ3eubNPLezxWfKIEbe7SwI39TM9qZxdduWD8nWc9ynMQjE93qCfXzmfuHk0uc946cmx8tj/Z", + "+Z+cIBzo7izzN1ZmBuHkmxpzZLvqVjE0Q1NBvDJXI+xsXS3fw1cRRtknT9DDmh6ceGlkfYZG1uOY1ubD", + "lrkbuo5sad3Kpnm/pZpHH5oVsBcn6xM7yU2f5TPTj87kzee1AvmuVns/KGTX0XyF1Y9yXvpFyg9M/SXY", + "PbjZdVngP3Nex9DTXIpzgiNysgzuTm5b2luza2yzLSI9j2WPtRJ7C6+oOvlWmeNTGHFym1z1no7WhEp+", + "YQa5K4SzezKz04BVk+obZ1KaoPOP4/ScrMj/qlVKteopMkpumyJH67Umtr7Epy5hEYXJRU2j7Cc2KqQs", + "/aESfYdUcUT2Gx9DZ9DDnT7gVtD3283TIHAD3Gp3Ol2vd9Zq99v93M+AGPubyJtyKO20mqAei0NfXyvs", + "gv7FGVNis1fnC8m4msPcSV0UpSKoucd5OFstz+v1+712c9CE1qnbH+BW56wPHu6durh3WoDTJHg1iAqg", + "IJGt6kNeG+sXxhSI1eoFTTxodXAHuvi0jQc912/3W9BsDzzl0h9CrOTS9eRed2Lve5F2VZRcQVLtLxeh", + "LQ0pUOwsOO1j3+s3fb8/8PqB2w1a7W7PhTPcg2a3AGxqIfQ09meEdHYhD8iufSuPyYPidU/b/dag3+82", + "z3puD86aHdc9C3rgAR6cDQbVoKRbp82xOXqh5aMIUT7o2AqSGZSHCVoAraCNAZ+dDtyB73e6p/1Br9WE", + "zlnPx71qmLTi1S4CDjlgf2Vv1tc3G/1/AAAA//9LzPWzQHQAAA==", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/internal/handler/job/query.go b/internal/handler/job/query.go index 06ab8a6a..805515db 100644 --- a/internal/handler/job/query.go +++ b/internal/handler/job/query.go @@ -18,7 +18,13 @@ import ( "github.com/siemens/wfx/persistence" ) -func QueryJobs(ctx context.Context, storage persistence.Storage, filterParams persistence.FilterParams, paginationParams persistence.PaginationParams, sort *string) (*api.PaginatedJobList, error) { +func QueryJobs( + ctx context.Context, + storage persistence.Storage, + filterParams persistence.FilterParams, + paginationParams persistence.PaginationParams, + sort *string, +) (*api.PaginatedJobList, error) { log := logging.LoggerFromCtx(ctx) var sortParams persistence.SortParams diff --git a/internal/persistence/entgo/job_query.go b/internal/persistence/entgo/job_query.go index 8d165406..a562f434 100644 --- a/internal/persistence/entgo/job_query.go +++ b/internal/persistence/entgo/job_query.go @@ -10,6 +10,7 @@ package entgo import ( "context" + "time" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqljson" @@ -54,8 +55,20 @@ func (db Database) QueryJobs(ctx context.Context, builder.Where(job.HasWorkflowWith(workflow.Name(*filterParams.Workflow))) } - for _, t := range filterParams.Tags { - builder.Where(job.HasTagsWith(tag.Name(t))) + if len(filterParams.Tags) > 0 { + log.Debug().Strs("tags", filterParams.Tags).Msg("Adding filter") + builder.Where(func(s *sql.Selector) { + tagJobsTable := sql.Table(tag.JobsTable) + tagTable := sql.Table(tag.Table) + + s.Join(tagJobsTable).On(s.C(job.FieldID), tagJobsTable.C(tag.JobsPrimaryKey[1])) + s.Join(tagTable).On(tagJobsTable.C(tag.JobsPrimaryKey[0]), tagTable.C(tag.FieldID)) + values := make([]any, len(filterParams.Tags)) + for i, v := range filterParams.Tags { + values[i] = v + } + s.Where(sql.In(tagTable.C(tag.FieldName), values...)) + }) } // deterministic ordering @@ -64,9 +77,28 @@ func (db Database) QueryJobs(ctx context.Context, } else { builder.Order(ent.Asc(job.FieldStime)) } + builder.Unique(true) + + var result api.PaginatedJobList + + if paginationParams.ComputeTotal { + counter := builder.Clone() - // need to clone builder because it is unusable after we call `All` - counter := builder.Clone() + start := time.Now() + count, err := counter.Count(ctx) + if err != nil { + log.Error().Err(err).Msg("Failed to count jobs") + return nil, fault.Wrap(err) + } + duration := time.Since(start) + log.Debug().Dur("duration", duration).Int("count", count).Msg("Computed total number of jobs") + + result.Pagination = &api.Pagination{ + Total: int64(count), + Limit: paginationParams.Limit, + Offset: paginationParams.Offset, + } + } jobs, err := builder. Limit(int(paginationParams.Limit)). @@ -77,30 +109,9 @@ func (db Database) QueryJobs(ctx context.Context, return nil, fault.Wrap(err) } - total, err := counter.Count(ctx) - if err != nil { - log.Error().Err(err).Msg("Failed to count jobs") - return nil, fault.Wrap(err) - } - - content := make([]api.Job, 0, len(jobs)) + result.Content = make([]api.Job, 0, len(jobs)) for _, entity := range jobs { - content = append(content, convertJob(entity)) + result.Content = append(result.Content, convertJob(entity)) } - result := api.PaginatedJobList{ - Pagination: api.Pagination{ - Total: int64(total), - Limit: paginationParams.Limit, - Offset: paginationParams.Offset, - }, - Content: content, - } - - log.Debug(). - Int("total", total). - Int32("limit", paginationParams.Limit). - Int64("offset", paginationParams.Offset). - Msg("Fetched jobs") - return &result, nil } diff --git a/internal/persistence/entgo/migrations/mysql/20260331155900_improve-tags-performance.down.sql b/internal/persistence/entgo/migrations/mysql/20260331155900_improve-tags-performance.down.sql new file mode 100644 index 00000000..6c58968e --- /dev/null +++ b/internal/persistence/entgo/migrations/mysql/20260331155900_improve-tags-performance.down.sql @@ -0,0 +1 @@ +DROP INDEX idx_tag_jobs_job_id; diff --git a/internal/persistence/entgo/migrations/mysql/20260331155900_improve-tags-performance.up.sql b/internal/persistence/entgo/migrations/mysql/20260331155900_improve-tags-performance.up.sql new file mode 100644 index 00000000..4c5603ed --- /dev/null +++ b/internal/persistence/entgo/migrations/mysql/20260331155900_improve-tags-performance.up.sql @@ -0,0 +1,2 @@ +-- for fast lookup by job_id +CREATE INDEX idx_tag_jobs_job_id ON tag_jobs(job_id); diff --git a/internal/persistence/entgo/migrations/mysql/atlas.sum b/internal/persistence/entgo/migrations/mysql/atlas.sum index c6d483d6..eaed7f1e 100644 --- a/internal/persistence/entgo/migrations/mysql/atlas.sum +++ b/internal/persistence/entgo/migrations/mysql/atlas.sum @@ -1,5 +1,7 @@ -h1:C8op8S5XqsDiKWhrmHbOoN+9EOsjf2dcl24LUqrixXY= +h1:5NLSRTI+mKHCsixy4uTAKNYRjpu/COEt/d4gOxJcNWA= 20230404121019_initial.down.sql h1:onR7HMd1VxSjISncbfPK5pbfEWxtmVvGX0HKQjg6zl8= 20230404121019_initial.up.sql h1:tJe3j8yp8IYgAyz/uDpaLiqWDGGln9MowFPLkUfvg1w= 20231026152159_add-workflow-description.down.sql h1:qxshHjBda9oskqQarNbmlpIu8ZxNmuv8UOty1kohfJA= 20231026152159_add-workflow-description.up.sql h1:cdkAHuM8k/PHvPbG05FiNZRsLleoEs1jX5lLk0oz4os= +20260331155900_improve-tags-performance.down.sql h1:8OZEMPAPc1dabSlgVppHSOBK/ZMDU5X3gBvmipM7EEU= +20260331155900_improve-tags-performance.up.sql h1:llwCT1iJtsVbsYtgyDnsaoWZ38v4+yRjm1uO+NbOiPw= diff --git a/internal/persistence/entgo/migrations/postgres/20260331155900_improve-tags-performance.down.sql b/internal/persistence/entgo/migrations/postgres/20260331155900_improve-tags-performance.down.sql new file mode 100644 index 00000000..6c58968e --- /dev/null +++ b/internal/persistence/entgo/migrations/postgres/20260331155900_improve-tags-performance.down.sql @@ -0,0 +1 @@ +DROP INDEX idx_tag_jobs_job_id; diff --git a/internal/persistence/entgo/migrations/postgres/20260331155900_improve-tags-performance.up.sql b/internal/persistence/entgo/migrations/postgres/20260331155900_improve-tags-performance.up.sql new file mode 100644 index 00000000..33a360e8 --- /dev/null +++ b/internal/persistence/entgo/migrations/postgres/20260331155900_improve-tags-performance.up.sql @@ -0,0 +1,2 @@ +-- for fast lookup by job_id +CREATE INDEX IF NOT EXISTS idx_tag_jobs_job_id ON tag_jobs(job_id); diff --git a/internal/persistence/entgo/migrations/postgres/atlas.sum b/internal/persistence/entgo/migrations/postgres/atlas.sum index 6bfce9db..776c401a 100644 --- a/internal/persistence/entgo/migrations/postgres/atlas.sum +++ b/internal/persistence/entgo/migrations/postgres/atlas.sum @@ -1,5 +1,7 @@ -h1:nn0Gq3r0udJNI+0dgn1Cn1IT0MS9HXn6W71NYDUb8Pg= +h1:CuMTI/SvQReygQ/YIFYFdlx9VI9SbLS4Ai+KtXytVdY= 20230404121326_initial.down.sql h1:n990REnpzYtaV9tS5QVdcNvZS/wBy3jIJUdW1PBABzI= 20230404121326_initial.up.sql h1:+IeXdLdW5V9SF6Ou0hTAWHtGyLc1kCxwEWCgjdzd1Jk= 20231026152156_add-workflow-description.down.sql h1:sEeYTP1tjKZDEjxkW5ybpUMM/9J58+YFv+FRHMl0zoc= 20231026152156_add-workflow-description.up.sql h1:zig1fC9n1YW5iMdwmZVEsaxVLjEtF0z40YcY9NFDOwA= +20260331155900_improve-tags-performance.down.sql h1:tUXaOz+M9Io48zlcQMdeRYr4fzl7J+bO+eLkBpwqEuE= +20260331155900_improve-tags-performance.up.sql h1:kyX+w+XptnfDq4IbKsT1oj6Xs2Mo0ao6Kc6AmvXzTrE= diff --git a/internal/persistence/entgo/migrations/sqlite/20260331155900_improve-tags-performance.down.sql b/internal/persistence/entgo/migrations/sqlite/20260331155900_improve-tags-performance.down.sql new file mode 100644 index 00000000..6c58968e --- /dev/null +++ b/internal/persistence/entgo/migrations/sqlite/20260331155900_improve-tags-performance.down.sql @@ -0,0 +1 @@ +DROP INDEX idx_tag_jobs_job_id; diff --git a/internal/persistence/entgo/migrations/sqlite/20260331155900_improve-tags-performance.up.sql b/internal/persistence/entgo/migrations/sqlite/20260331155900_improve-tags-performance.up.sql new file mode 100644 index 00000000..33a360e8 --- /dev/null +++ b/internal/persistence/entgo/migrations/sqlite/20260331155900_improve-tags-performance.up.sql @@ -0,0 +1,2 @@ +-- for fast lookup by job_id +CREATE INDEX IF NOT EXISTS idx_tag_jobs_job_id ON tag_jobs(job_id); diff --git a/internal/persistence/entgo/migrations/sqlite/atlas.sum b/internal/persistence/entgo/migrations/sqlite/atlas.sum index 999d7e28..7b00b456 100644 --- a/internal/persistence/entgo/migrations/sqlite/atlas.sum +++ b/internal/persistence/entgo/migrations/sqlite/atlas.sum @@ -1,5 +1,7 @@ -h1:1q31cQHflaeyjSYG0SxMhWrXAbNlId0YP0gK6qlh9EM= +h1:2crTzkdAGl3yYC0mEMbHO3W/TanPl2eXVPE0ysl+IRU= 20230404114557_initial.down.sql h1:7UnrYD76XgGymtXgk58CNsevSAl+wLpi0EPgaKHgukU= 20230404114557_initial.up.sql h1:hdUyb3CQQZWD0Zt8gViVi/DTUBqeB11snpS+n0weKEQ= 20231026152143_add-workflow-description.down.sql h1:O0ZPs3WyFOdzH31sCZKzGvebOQOwMxcJgDg8eKGaPxs= 20231026152143_add-workflow-description.up.sql h1:zhRGwdbY8WTybQl3PpMTLxrkr8dCOEKTEecvCsqy5PY= +20260331155900_improve-tags-performance.down.sql h1:wGVeXkfo8hC+KMBHmV6guY1zKsDrMmCQHk0g5cIjdsw= +20260331155900_improve-tags-performance.up.sql h1:JkV+sl6wa3nXgiAj8uuho3xK7b8IWoW5sBemKsk4kpg= diff --git a/internal/persistence/entgo/mysql.go b/internal/persistence/entgo/mysql.go index 9ead97cf..62151ee4 100644 --- a/internal/persistence/entgo/mysql.go +++ b/internal/persistence/entgo/mysql.go @@ -14,6 +14,7 @@ import ( "context" "database/sql" "embed" + "fmt" "entgo.io/ent/dialect" entsql "entgo.io/ent/dialect/sql" @@ -21,6 +22,7 @@ import ( driver "github.com/go-sql-driver/mysql" "github.com/golang-migrate/migrate/v4/database/mysql" "github.com/golang-migrate/migrate/v4/source/iofs" + "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/siemens/wfx/generated/ent" "github.com/siemens/wfx/persistence" @@ -91,7 +93,14 @@ func (wrapper *MySQL) Initialize(options string) error { // Create an ent.Driver from `db`. drv := entsql.OpenDB(dialect.MySQL, db) - client := ent.NewClient(ent.Driver(drv)) + client := ent.NewClient(ent.Driver(drv), ent.Log(func(v ...any) { + log.Logger.Trace().Str("component", "entgo").Msg(fmt.Sprint(v...)) + })) + + if zerolog.GlobalLevel() <= zerolog.TraceLevel { + // log queries + client = client.Debug() + } wrapper.Database = Database{client: client} return nil diff --git a/internal/persistence/entgo/postgres.go b/internal/persistence/entgo/postgres.go index 2226934f..a7889e32 100644 --- a/internal/persistence/entgo/postgres.go +++ b/internal/persistence/entgo/postgres.go @@ -28,6 +28,7 @@ import ( "entgo.io/ent/dialect" entsql "entgo.io/ent/dialect/sql" + "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/siemens/wfx/generated/ent" @@ -184,7 +185,15 @@ func (wrapper *PostgreSQL) Initialize(options string) error { // Create an ent.Driver from `db`. drv := entsql.OpenDB(dialect.Postgres, db) - client := ent.NewClient(ent.Driver(drv)) + client := ent.NewClient(ent.Driver(drv), ent.Log(func(v ...any) { + log.Logger.Trace().Str("component", "entgo").Msg(fmt.Sprint(v...)) + })) + + if zerolog.GlobalLevel() <= zerolog.TraceLevel { + // log queries + client = client.Debug() + } + wrapper.Database = Database{client: client} return nil } diff --git a/internal/persistence/entgo/sqlite.go b/internal/persistence/entgo/sqlite.go index b83f0785..6df07478 100644 --- a/internal/persistence/entgo/sqlite.go +++ b/internal/persistence/entgo/sqlite.go @@ -12,6 +12,7 @@ package entgo import ( "embed" + "fmt" "net/url" "entgo.io/ent/dialect" @@ -19,6 +20,7 @@ import ( "github.com/Southclaws/fault" "github.com/golang-migrate/migrate/v4/database/sqlite3" "github.com/golang-migrate/migrate/v4/source/iofs" + "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/siemens/wfx/generated/ent" "github.com/siemens/wfx/persistence" @@ -45,7 +47,15 @@ func (instance *SQLite) Initialize(dsn string) error { log.Error().Err(err).Msg("Failed opening connection to SQLite") return fault.Wrap(err) } - client := ent.NewClient(ent.Driver(drv)) + client := ent.NewClient(ent.Driver(drv), ent.Log(func(v ...any) { + log.Logger.Trace().Str("component", "entgo").Msg(fmt.Sprint(v...)) + })) + + if zerolog.GlobalLevel() <= zerolog.TraceLevel { + // log queries + client = client.Debug() + } + log.Debug().Msg("Connected to SQLite") instance.Database = Database{client: client} diff --git a/internal/persistence/entgo/workflow_query.go b/internal/persistence/entgo/workflow_query.go index 983616be..4e5ff7b1 100644 --- a/internal/persistence/entgo/workflow_query.go +++ b/internal/persistence/entgo/workflow_query.go @@ -10,6 +10,7 @@ package entgo import ( "context" + "time" "github.com/Southclaws/fault" "github.com/siemens/wfx/generated/api" @@ -46,30 +47,28 @@ func (db Database) QueryWorkflows(ctx context.Context, sortParams persistence.So return nil, fault.Wrap(err) } - total, err := counter.Count(ctx) - if err != nil { - log.Error().Err(err).Msg("Failed to count workflows") - return nil, fault.Wrap(err) - } + var result api.PaginatedWorkflowList - content := make([]api.Workflow, 0, len(workflows)) - for _, wf := range workflows { - content = append(content, convertWorkflow(wf)) - } - result := api.PaginatedWorkflowList{ - Pagination: api.Pagination{ - Total: int64(total), + if paginationParams.ComputeTotal { + start := time.Now() + count, err := counter.Count(ctx) + if err != nil { + log.Error().Err(err).Msg("Failed to count workflows") + return nil, fault.Wrap(err) + } + duration := time.Since(start) + log.Debug().Dur("duration", duration).Int("count", count).Msg("Computed total number of workflows") + + result.Pagination = &api.Pagination{ + Total: int64(count), Offset: paginationParams.Offset, Limit: paginationParams.Limit, - }, - Content: content, + } } - log.Debug(). - Int("total", total). - Int32("limit", paginationParams.Limit). - Int64("offset", paginationParams.Offset). - Msg("Fetched workflows") - + result.Content = make([]api.Workflow, 0, len(workflows)) + for _, wf := range workflows { + result.Content = append(result.Content, convertWorkflow(wf)) + } return &result, nil } diff --git a/internal/persistence/tests/job_query.go b/internal/persistence/tests/job_query.go index 3ad43981..e3afaf5d 100644 --- a/internal/persistence/tests/job_query.go +++ b/internal/persistence/tests/job_query.go @@ -11,7 +11,6 @@ package tests */ import ( - "context" "fmt" "strconv" "testing" @@ -37,13 +36,13 @@ func TestQueryJobsFilter(t *testing.T, db persistence.Storage) { clientID := "42" wf := dau.DirectWorkflow() - _, err := db.CreateWorkflow(context.Background(), wf) + _, err := db.CreateWorkflow(t.Context(), wf) require.NoError(t, err) now := time.Now() tags := []string{"bar", "foo"} - firstJob, err := db.CreateJob(context.Background(), &api.Job{ + firstJob, err := db.CreateJob(t.Context(), &api.Job{ ClientID: clientID, Workflow: wf, Status: &api.JobStatus{ @@ -58,7 +57,7 @@ func TestQueryJobsFilter(t *testing.T, db persistence.Storage) { assert.True(t, time.Time(*firstJob.Mtime).After(now) || time.Time(*firstJob.Mtime).Equal(now)) secondStime := now.Add(time.Second) - secondJob, err := db.CreateJob(context.Background(), &api.Job{ + secondJob, err := db.CreateJob(t.Context(), &api.Job{ ClientID: clientID, Workflow: wf, Status: &api.JobStatus{ @@ -70,7 +69,7 @@ func TestQueryJobsFilter(t *testing.T, db persistence.Storage) { thirdStime := now.Add(2 * time.Second) tagsList := []string{"meh"} - thirdJob, err := db.CreateJob(context.Background(), &api.Job{ + thirdJob, err := db.CreateJob(t.Context(), &api.Job{ ClientID: clientID, Workflow: wf, Status: &api.JobStatus{ @@ -82,7 +81,7 @@ func TestQueryJobsFilter(t *testing.T, db persistence.Storage) { require.NoError(t, err) { // filter by group - result, err := db.QueryJobs(context.Background(), persistence.FilterParams{ClientID: &clientID, Group: []string{"OPEN"}}, + result, err := db.QueryJobs(t.Context(), persistence.FilterParams{ClientID: &clientID, Group: []string{"OPEN"}}, sortAsc, defaultPaginationParams) actual := result.Content require.NoError(t, err) @@ -93,7 +92,7 @@ func TestQueryJobsFilter(t *testing.T, db persistence.Storage) { } { // filter by group - result, err := db.QueryJobs(context.Background(), persistence.FilterParams{ClientID: &clientID, Group: []string{"CLOSED"}}, + result, err := db.QueryJobs(t.Context(), persistence.FilterParams{ClientID: &clientID, Group: []string{"CLOSED"}}, sortAsc, defaultPaginationParams) actual := result.Content require.NoError(t, err) @@ -102,7 +101,7 @@ func TestQueryJobsFilter(t *testing.T, db persistence.Storage) { } { // filter by group - result, err := db.QueryJobs(context.Background(), persistence.FilterParams{ClientID: &clientID, Group: []string{"OPEN", "CLOSED"}}, + result, err := db.QueryJobs(t.Context(), persistence.FilterParams{ClientID: &clientID, Group: []string{"OPEN", "CLOSED"}}, sortAsc, defaultPaginationParams) actual := result.Content require.NoError(t, err) @@ -113,7 +112,7 @@ func TestQueryJobsFilter(t *testing.T, db persistence.Storage) { } { - result, err := db.QueryJobs(context.Background(), persistence.FilterParams{ClientID: &clientID, State: &installedState}, + result, err := db.QueryJobs(t.Context(), persistence.FilterParams{ClientID: &clientID, State: &installedState}, sortAsc, defaultPaginationParams) actual := result.Content require.NoError(t, err) @@ -123,7 +122,7 @@ func TestQueryJobsFilter(t *testing.T, db persistence.Storage) { { // filter by name - result, err := db.QueryJobs(context.Background(), persistence.FilterParams{ClientID: &clientID, Workflow: &wf.Name}, + result, err := db.QueryJobs(t.Context(), persistence.FilterParams{ClientID: &clientID, Workflow: &wf.Name}, sortAsc, defaultPaginationParams) actual := result.Content require.NoError(t, err) @@ -131,7 +130,7 @@ func TestQueryJobsFilter(t *testing.T, db persistence.Storage) { assert.Equal(t, []string{firstJob.ID, secondJob.ID, thirdJob.ID}, []string{actual[0].ID, actual[1].ID, actual[2].ID}) doesNotExist := "doesNotExist" - result, err = db.QueryJobs(context.Background(), persistence.FilterParams{ClientID: &clientID, Workflow: &doesNotExist}, + result, err = db.QueryJobs(t.Context(), persistence.FilterParams{ClientID: &clientID, Workflow: &doesNotExist}, sortAsc, defaultPaginationParams) actual = result.Content require.NoError(t, err) @@ -140,7 +139,7 @@ func TestQueryJobsFilter(t *testing.T, db persistence.Storage) { // filter by tags { - result, err := db.QueryJobs(context.Background(), persistence.FilterParams{Tags: []string{"foo"}}, + result, err := db.QueryJobs(t.Context(), persistence.FilterParams{Tags: []string{"foo"}}, sortAsc, defaultPaginationParams) actual := result.Content require.NoError(t, err) @@ -148,7 +147,7 @@ func TestQueryJobsFilter(t *testing.T, db persistence.Storage) { assert.Equal(t, firstJob.ID, actual[0].ID) } { - result, err := db.QueryJobs(context.Background(), persistence.FilterParams{Tags: []string{"bar"}}, + result, err := db.QueryJobs(t.Context(), persistence.FilterParams{Tags: []string{"bar"}}, sortAsc, defaultPaginationParams) actual := result.Content require.NoError(t, err) @@ -156,7 +155,7 @@ func TestQueryJobsFilter(t *testing.T, db persistence.Storage) { assert.Equal(t, firstJob.ID, actual[0].ID) } { - result, err := db.QueryJobs(context.Background(), persistence.FilterParams{Tags: []string{"foo", "bar"}}, + result, err := db.QueryJobs(t.Context(), persistence.FilterParams{Tags: []string{"foo", "bar"}}, sortAsc, defaultPaginationParams) actual := result.Content require.NoError(t, err) @@ -173,12 +172,12 @@ func TestGetJobsSorted(t *testing.T, db persistence.Storage) { { tmp := newValidJob(clientID) - _, err := db.CreateWorkflow(context.Background(), tmp.Workflow) + _, err := db.CreateWorkflow(t.Context(), tmp.Workflow) require.NoError(t, err) stime := time.Now().Add(-2 * time.Minute) tmp.Stime = &stime - first, err = db.CreateJob(context.Background(), tmp) + first, err = db.CreateJob(t.Context(), tmp) require.NoError(t, err) } @@ -186,19 +185,19 @@ func TestGetJobsSorted(t *testing.T, db persistence.Storage) { tmp := newValidJob(clientID) mtime := time.Now().Add(-time.Minute) tmp.Mtime = &mtime - second, err = db.CreateJob(context.Background(), tmp) + second, err = db.CreateJob(t.Context(), tmp) require.NoError(t, err) } { tmp := newValidJob(clientID) - third, err = db.CreateJob(context.Background(), tmp) + third, err = db.CreateJob(t.Context(), tmp) require.NoError(t, err) } { result, err := db.QueryJobs( - context.Background(), + t.Context(), persistence.FilterParams{ClientID: &clientID}, persistence.SortParams{Desc: false}, defaultPaginationParams, @@ -212,7 +211,7 @@ func TestGetJobsSorted(t *testing.T, db persistence.Storage) { { result, err := db.QueryJobs( - context.Background(), + t.Context(), persistence.FilterParams{ClientID: &clientID}, persistence.SortParams{Desc: true}, defaultPaginationParams, @@ -240,15 +239,15 @@ func TestGetJobMaxHistorySize(t *testing.T, db persistence.Storage) { tmp := newValidJob("foo") message := "0" tmp.Status.Message = message - _, err := db.CreateWorkflow(context.Background(), tmp.Workflow) + _, err := db.CreateWorkflow(t.Context(), tmp.Workflow) require.NoError(t, err) - job, err := db.CreateJob(context.Background(), tmp) + job, err := db.CreateJob(t.Context(), tmp) require.NoError(t, err) jobID = job.ID } { - job, err := db.GetJob(context.Background(), jobID, persistence.FetchParams{History: true}) + job, err := db.GetJob(t.Context(), jobID, persistence.FetchParams{History: true}) require.NoError(t, err) require.Empty(t, job.History) require.Equal(t, job.Status.Message, "0") @@ -256,7 +255,7 @@ func TestGetJobMaxHistorySize(t *testing.T, db persistence.Storage) { // update the job n+1 times for i := 1; i <= maxEntries+1; i++ { msg := fmt.Sprintf("%d", i) - job, err = db.UpdateJob(context.Background(), job, persistence.JobUpdate{ + job, err = db.UpdateJob(t.Context(), job, persistence.JobUpdate{ Status: &api.JobStatus{ State: job.Status.State, Message: msg, @@ -267,7 +266,7 @@ func TestGetJobMaxHistorySize(t *testing.T, db persistence.Storage) { } } - job, err := db.GetJob(context.Background(), jobID, persistence.FetchParams{History: true}) + job, err := db.GetJob(t.Context(), jobID, persistence.FetchParams{History: true}) require.NoError(t, err) require.NotNil(t, job.History) require.Len(t, *job.History, maxEntries) @@ -290,18 +289,25 @@ func TestJobsPagination(t *testing.T, db persistence.Storage) { total := 5 ids := make([]string, 0, total) - _, err := db.CreateWorkflow(context.Background(), dau.DirectWorkflow()) + _, err := db.CreateWorkflow(t.Context(), dau.DirectWorkflow()) assert.NoError(t, err) for range total { tmp := newValidJob(*filterParams.ClientID) - job, err := db.CreateJob(context.Background(), tmp) + job, err := db.CreateJob(t.Context(), tmp) require.NoError(t, err) ids = append(ids, job.ID) } { - result, err := db.QueryJobs(context.Background(), filterParams, sortAsc, persistence.PaginationParams{Offset: 0, Limit: 2}) - assert.NoError(t, err) + result, err := db.QueryJobs(t.Context(), filterParams, sortAsc, persistence.PaginationParams{Offset: 0, Limit: 2, ComputeTotal: false}) + require.NoError(t, err) + assert.Nil(t, result.Pagination) + assert.Len(t, result.Content, 2) + assert.Equal(t, []string{ids[0], ids[1]}, []string{result.Content[0].ID, result.Content[1].ID}) + } + { + result, err := db.QueryJobs(t.Context(), filterParams, sortAsc, persistence.PaginationParams{Offset: 0, Limit: 2, ComputeTotal: true}) + require.NoError(t, err) assert.Equal(t, int64(0), result.Pagination.Offset) assert.Equal(t, int32(2), result.Pagination.Limit) assert.Equal(t, int64(total), result.Pagination.Total) @@ -309,8 +315,8 @@ func TestJobsPagination(t *testing.T, db persistence.Storage) { assert.Equal(t, []string{ids[0], ids[1]}, []string{result.Content[0].ID, result.Content[1].ID}) } { - result, err := db.QueryJobs(context.Background(), filterParams, sortAsc, persistence.PaginationParams{Offset: 1, Limit: 2}) - assert.NoError(t, err) + result, err := db.QueryJobs(t.Context(), filterParams, sortAsc, persistence.PaginationParams{Offset: 1, Limit: 2, ComputeTotal: true}) + require.NoError(t, err) assert.Equal(t, int64(1), result.Pagination.Offset) assert.Equal(t, int32(2), result.Pagination.Limit) assert.Equal(t, int64(total), result.Pagination.Total) @@ -318,8 +324,8 @@ func TestJobsPagination(t *testing.T, db persistence.Storage) { assert.Equal(t, ids[1:3], []string{result.Content[0].ID, result.Content[1].ID}) } { - result, err := db.QueryJobs(context.Background(), filterParams, sortAsc, persistence.PaginationParams{Offset: 2, Limit: 2}) - assert.NoError(t, err) + result, err := db.QueryJobs(t.Context(), filterParams, sortAsc, persistence.PaginationParams{Offset: 2, Limit: 2, ComputeTotal: true}) + require.NoError(t, err) assert.Equal(t, int64(2), result.Pagination.Offset) assert.Equal(t, int32(2), result.Pagination.Limit) assert.Equal(t, int64(total), result.Pagination.Total) @@ -327,8 +333,8 @@ func TestJobsPagination(t *testing.T, db persistence.Storage) { assert.Equal(t, ids[2:4], []string{result.Content[0].ID, result.Content[1].ID}) } { // one past the last page - result, err := db.QueryJobs(context.Background(), filterParams, sortAsc, persistence.PaginationParams{Offset: 6, Limit: 2}) - assert.NoError(t, err) + result, err := db.QueryJobs(t.Context(), filterParams, sortAsc, persistence.PaginationParams{Offset: 6, Limit: 2, ComputeTotal: true}) + require.NoError(t, err) assert.Equal(t, int64(6), result.Pagination.Offset) assert.Equal(t, int32(2), result.Pagination.Limit) assert.Equal(t, int64(total), result.Pagination.Total) diff --git a/internal/server/jobs_test.go b/internal/server/jobs_test.go index 7eab688b..fa92f540 100644 --- a/internal/server/jobs_test.go +++ b/internal/server/jobs_test.go @@ -9,7 +9,6 @@ package server */ import ( - "context" "fmt" "net/http" "testing" @@ -24,42 +23,75 @@ import ( "github.com/stretchr/testify/require" ) -func TestGetJobsHandler_Group(t *testing.T) { +func TestGetJobs(t *testing.T) { db := newInMemoryDB(t) - workflow, err := db.CreateWorkflow(context.Background(), dau.DirectWorkflow()) + workflow, err := db.CreateWorkflow(t.Context(), dau.DirectWorkflow()) require.NoError(t, err) - _, err = db.CreateJob(context.Background(), &api.Job{ + _, err = db.CreateJob(t.Context(), &api.Job{ ClientID: "foo", Status: &api.JobStatus{ State: "INSTALL", }, Workflow: &api.Workflow{Name: workflow.Name}, }) - assert.NoError(t, err) + require.NoError(t, err) north, south := createNorthAndSouth(t, db) handlers := []http.Handler{north, south} for i, handler := range handlers { t.Run(allAPIs[i], func(t *testing.T) { - apitest.New(). - Handler(handler). - Get("/api/wfx/v1/jobs"). - Query("group", "OPEN"). - Expect(t). - Assert(jsonpath.Len(`$.content`, 1)). - Status(http.StatusOK). - End() - - apitest.New(). - Handler(handler). - Get("/api/wfx/v1/jobs"). - Query("group", "CLOSED"). - Expect(t). - Assert(jsonpath.Len(`$.content`, 0)). - Status(http.StatusOK). - End() + t.Run("QueryByGroup", func(t *testing.T) { + apitest.New(). + Handler(handler). + Get("/api/wfx/v1/jobs"). + Query("group", "OPEN"). + Expect(t). + Assert(jsonpath.Len(`$.content`, 1)). + Status(http.StatusOK). + End() + apitest.New(). + Handler(handler). + Get("/api/wfx/v1/jobs"). + Query("group", "CLOSED"). + Expect(t). + Assert(jsonpath.Len(`$.content`, 0)). + Status(http.StatusOK). + End() + }) + t.Run("Pagination", func(t *testing.T) { + apitest.New(). + Handler(handler). + Get("/api/wfx/v1/jobs"). + Query("pagination", "true"). + Expect(t). + Assert(jsonpath.Len(`$.content`, 1)). + Assert(jsonpath.Equal(`$.pagination.total`, float64(1))). + Assert(jsonpath.Equal(`$.pagination.limit`, float64(10))). + Assert(jsonpath.Equal(`$.pagination.offset`, float64(0))). + Status(http.StatusOK). + End() + }) + t.Run("WithoutPagination", func(t *testing.T) { + apitest.New(). + Handler(handler). + Get("/api/wfx/v1/jobs"). + Query("pagination", "false"). + Expect(t). + Assert(jsonpath.Len(`$.content`, 1)). + Assert(jsonpath.NotPresent(`$.pagination`)). + Status(http.StatusOK). + End() + apitest.New(). + Handler(handler). + Get("/api/wfx/v1/jobs"). + Expect(t). + Assert(jsonpath.Len(`$.content`, 1)). + Assert(jsonpath.NotPresent(`$.pagination`)). + Status(http.StatusOK). + End() + }) }) } } @@ -68,7 +100,7 @@ func TestCreateJob(t *testing.T) { db := newInMemoryDB(t) north, _ := createNorthAndSouth(t, db) - wf, err := workflow.CreateWorkflow(context.Background(), db, dau.DirectWorkflow()) + wf, err := workflow.CreateWorkflow(t.Context(), db, dau.DirectWorkflow()) require.NoError(t, err) // create job using that workflow @@ -94,7 +126,7 @@ func TestCreateJob(t *testing.T) { Assert(jsonpath.Equal(`$.definition.url`, "http://localhost/update.tgz")). End() - jobs, err := db.QueryJobs(context.Background(), persistence.FilterParams{}, persistence.SortParams{}, persistence.PaginationParams{Offset: 0, Limit: 1}) + jobs, err := db.QueryJobs(t.Context(), persistence.FilterParams{}, persistence.SortParams{}, persistence.PaginationParams{Offset: 0, Limit: 1}) assert.NoError(t, err) assert.Len(t, jobs.Content, 1) } @@ -124,7 +156,7 @@ func TestCreateJob_Invalid(t *testing.T) { Status(http.StatusBadRequest). End() - jobs, err := db.QueryJobs(context.Background(), persistence.FilterParams{}, persistence.SortParams{}, persistence.PaginationParams{Offset: 0, Limit: 1}) + jobs, err := db.QueryJobs(t.Context(), persistence.FilterParams{}, persistence.SortParams{}, persistence.PaginationParams{Offset: 0, Limit: 1}) require.NoError(t, err) assert.Len(t, jobs.Content, 0) } diff --git a/internal/server/server_collection.go b/internal/server/server_collection.go index 733c6e20..0a602d55 100644 --- a/internal/server/server_collection.go +++ b/internal/server/server_collection.go @@ -336,16 +336,12 @@ func createListener(scheme config.Scheme, settings ListenerSettings) (net.Listen return nil, fmt.Errorf("unsupported scheme: %s", scheme) } contextLogger := log.With().Str("network", network).Str("addr", addr).Str("scheme", scheme.String()).Logger() - for range 30 { - ln, err := net.Listen(network, addr) - if err == nil { - contextLogger.Debug().Msg("Created new listener") - return ln, nil - } - contextLogger.Err(err).Msg("Failed to create listener") - time.Sleep(time.Second) + ln, err := net.Listen(network, addr) + if err != nil { + return nil, fault.Wrap(err) } - return nil, errors.New("failed to create listener") + contextLogger.Debug().Msg("Created new listener") + return ln, nil } func createMux(cfg *config.AppConfig, basePath string, registerUI bool) *http.ServeMux { diff --git a/internal/server/workflow_test.go b/internal/server/workflow_test.go index 9d2e2fdb..ff675348 100644 --- a/internal/server/workflow_test.go +++ b/internal/server/workflow_test.go @@ -9,7 +9,6 @@ package server */ import ( - "context" "encoding/json" "fmt" "net/http" @@ -38,10 +37,10 @@ func TestGetWorkflow(t *testing.T) { tmp := dau.DirectWorkflow() tmp.Name = "45b68304-4a78-4f78-b4f5-776309c3616f" - wf, err := workflow.CreateWorkflow(context.Background(), db, tmp) + wf, err := workflow.CreateWorkflow(t.Context(), db, tmp) require.NoError(t, err) t.Cleanup(func() { - _ = db.DeleteWorkflow(context.Background(), wf.Name) + _ = db.DeleteWorkflow(t.Context(), wf.Name) }) handlers := []http.Handler{north, south} @@ -63,19 +62,54 @@ func TestQueryWorkflows(t *testing.T) { db := newInMemoryDB(t) north, south := createNorthAndSouth(t, db) - _, err := workflow.CreateWorkflow(context.Background(), db, dau.DirectWorkflow()) + _, err := workflow.CreateWorkflow(t.Context(), db, dau.DirectWorkflow()) require.NoError(t, err) handlers := []http.Handler{north, south} for i, name := range allAPIs { t.Run(name, func(t *testing.T) { - // read all - apitest.New(). - Handler(handlers[i]). - Get("/api/wfx/v1/workflows"). - Expect(t). - Status(http.StatusOK). - Assert(jsonpath.GreaterThan(`$.content`, 0)). - End() + t.Run("Default", func(t *testing.T) { + apitest.New(). + Handler(handlers[i]). + Get("/api/wfx/v1/workflows"). + Expect(t). + Status(http.StatusOK). + Assert(jsonpath.Len(`$.content`, 1)). + End() + }) + + t.Run("Pagination", func(t *testing.T) { + apitest.New(). + Handler(handlers[i]). + Get("/api/wfx/v1/workflows"). + Query("pagination", "true"). + Expect(t). + Status(http.StatusOK). + Assert(jsonpath.Len(`$.content`, 1)). + Assert(jsonpath.Equal(`$.pagination.total`, float64(1))). + Assert(jsonpath.Equal(`$.pagination.limit`, float64(10))). + Assert(jsonpath.Equal(`$.pagination.offset`, float64(0))). + End() + }) + + t.Run("WithoutPagination", func(t *testing.T) { + apitest.New(). + Handler(handlers[i]). + Get("/api/wfx/v1/workflows"). + Query("pagination", "false"). + Expect(t). + Assert(jsonpath.Len(`$.content`, 1)). + Assert(jsonpath.NotPresent(`$.pagination`)). + Status(http.StatusOK). + End() + apitest.New(). + Handler(handlers[i]). + Get("/api/wfx/v1/workflows"). + Expect(t). + Assert(jsonpath.Len(`$.content`, 1)). + Assert(jsonpath.NotPresent(`$.pagination`)). + Status(http.StatusOK). + End() + }) }) } } @@ -88,10 +122,10 @@ func TestDeleteWorkflow(t *testing.T) { name := "584802e1-3a90-483a-924f-a638e488c531" tmp.Name = name - wf, err := workflow.CreateWorkflow(context.Background(), db, tmp) + wf, err := workflow.CreateWorkflow(t.Context(), db, tmp) require.NoError(t, err) t.Cleanup(func() { - _ = db.DeleteWorkflow(context.Background(), name) + _ = db.DeleteWorkflow(t.Context(), name) }) // delete shall fail for south @@ -111,7 +145,7 @@ func TestDeleteWorkflow(t *testing.T) { Status(http.StatusNoContent). End() - actual, err := db.GetWorkflow(context.Background(), wf.Name) + actual, err := db.GetWorkflow(t.Context(), wf.Name) assert.Nil(t, actual) assert.Equal(t, ftag.NotFound, ftag.Get(err)) } @@ -125,7 +159,7 @@ func TestCreateWorkflow(t *testing.T) { wf.Name = name wfJSON, _ := json.Marshal(wf) t.Cleanup(func() { - _ = db.DeleteWorkflow(context.Background(), name) + _ = db.DeleteWorkflow(t.Context(), name) }) // south is not allowed @@ -182,15 +216,19 @@ func newInMemoryDB(t *testing.T) persistence.Storage { t.Cleanup(db.Shutdown) t.Cleanup(func() { { - list, _ := db.QueryJobs(context.Background(), persistence.FilterParams{}, persistence.SortParams{}, persistence.PaginationParams{Limit: 100}) - for _, job := range list.Content { - _ = db.DeleteJob(context.Background(), job.ID) + list, _ := db.QueryJobs(t.Context(), persistence.FilterParams{}, persistence.SortParams{}, persistence.PaginationParams{Limit: 100}) + if list != nil { + for _, job := range list.Content { + _ = db.DeleteJob(t.Context(), job.ID) + } } } { - list, _ := db.QueryWorkflows(context.Background(), persistence.SortParams{Desc: false}, persistence.PaginationParams{Limit: 100}) - for _, wf := range list.Content { - _ = db.DeleteWorkflow(context.Background(), wf.Name) + list, _ := db.QueryWorkflows(t.Context(), persistence.SortParams{Desc: false}, persistence.PaginationParams{Limit: 100}) + if list != nil { + for _, wf := range list.Content { + _ = db.DeleteWorkflow(t.Context(), wf.Name) + } } } }) @@ -214,8 +252,8 @@ func createNorthAndSouth(t *testing.T, db persistence.Storage) (http.Handler, ht func persistJob(t *testing.T, db persistence.Storage) *api.Job { wf := dau.DirectWorkflow() - if found, _ := workflow.GetWorkflow(context.Background(), db, wf.Name); found == nil { - _, err := workflow.CreateWorkflow(context.Background(), db, wf) + if found, _ := workflow.GetWorkflow(t.Context(), db, wf.Name); found == nil { + _, err := workflow.CreateWorkflow(t.Context(), db, wf) require.NoError(t, err) } @@ -223,7 +261,7 @@ func persistJob(t *testing.T, db persistence.Storage) *api.Job { ClientID: "foo", Workflow: wf.Name, } - job, err := job.CreateJob(context.Background(), db, &jobReq) + job, err := job.CreateJob(t.Context(), db, &jobReq) require.NoError(t, err) return job } diff --git a/justfile b/justfile index 3f360c9b..d7801c01 100644 --- a/justfile +++ b/justfile @@ -165,7 +165,7 @@ postgres-stop: (_container-stop "wfx-postgres") # Start wfx and connect to Postgres database @postgres-wfx: postgres-start - ./wfx --log-level debug \ + ./wfx --log-level trace \ --storage postgres \ --storage-opt "host=$PGHOST port=5432 database=$PGDATABASE user=$PGUSER password=$PGPASSWORD sslmode=disable" @@ -228,7 +228,7 @@ mysql-generate-schema name: # Start wfx and connect to MySQL container. mysql-wfx: mysql-start - ./wfx --log-level debug \ + ./wfx --log-level trace \ --storage mysql \ --storage-opt "$MYSQL_USER:$MYSQL_PASSWORD@/$MYSQL_DATABASE" diff --git a/persistence/storage.go b/persistence/storage.go index 1f5a62a3..2592e2b4 100644 --- a/persistence/storage.go +++ b/persistence/storage.go @@ -78,6 +78,9 @@ type PaginationParams struct { Offset int64 // Limit is the maximum number of items to return in the response. Limit int32 + // ComputeTotal, when set to true, computes the total number of available entries. + // Note that this requires a separate count query and may significantly impact performance on large data sets. + ComputeTotal bool } // FetchParams control the level of detail returned by fetch operations. diff --git a/spec/wfx.openapi.yml b/spec/wfx.openapi.yml index 236cb51e..2462058f 100644 --- a/spec/wfx.openapi.yml +++ b/spec/wfx.openapi.yml @@ -109,6 +109,7 @@ paths: - $ref: "#/components/parameters/limit" - $ref: "#/components/parameters/offset" - $ref: "#/components/parameters/sort" + - $ref: "#/components/parameters/pagination" responses: default: description: Other error with any status code and response body format. @@ -255,6 +256,7 @@ paths: - $ref: "#/components/parameters/group" - $ref: "#/components/parameters/clientId" - $ref: "#/components/parameters/tag" + - $ref: "#/components/parameters/pagination" - name: workflow x-go-name: paramWorkflow in: query @@ -807,7 +809,6 @@ components: PaginatedWorkflowList: type: object required: - - pagination - content properties: pagination: @@ -1128,7 +1129,6 @@ components: PaginatedJobList: type: object required: - - pagination - content properties: pagination: @@ -1332,6 +1332,15 @@ components: required: false schema: type: string + pagination: + name: pagination + x-go-name: paramPagination + in: query + description: If true, pagination metadata will be included in the response + required: false + schema: + type: boolean + default: false x-paths-templates: Errors: diff --git a/ui/shell.nix b/ui/shell.nix index 5ca90fb6..54e247c3 100644 --- a/ui/shell.nix +++ b/ui/shell.nix @@ -15,7 +15,7 @@ pkgs.mkShell { beamPackages.rebar3 beamPackages.erlang inotify-tools - nodePackages.npm + nodejs bun tailwindcss_4 ]; diff --git a/ui/src/app.gleam b/ui/src/app.gleam index 77576aaa..a030fd98 100644 --- a/ui/src/app.gleam +++ b/ui/src/app.gleam @@ -368,7 +368,7 @@ fn get_jobs( let append = string_tree.append let url = string_tree.from_string(wfx_url) - |> append("/jobs?sort=desc&offset=") + |> append("/jobs?pagination=true&sort=desc&offset=") |> append(int.to_string({ page - 1 } * limit)) |> append("&limit=") |> append(int.to_string(limit)) @@ -386,7 +386,7 @@ fn get_workflows( let append = string_tree.append let url = string_tree.from_string(wfx_url) - |> append("/workflows?sort=asc&offset=") + |> append("/workflows?pagination=true&sort=asc&offset=") |> append(int.to_string({ page - 1 } * limit)) |> append("&limit=") |> append(int.to_string(limit))