diff --git a/README.md b/README.md index 3c02b9570ee..76b1ab20e9b 100644 --- a/README.md +++ b/README.md @@ -197,7 +197,6 @@ Configuration mechanisms do have an order of precedence. As presented, options a As shown above, there are several ways to configure the number of simultaneous virtual users k6 will launch. There are also different ways to specify how long those virtual users will be running. For simple tests you can: - Set the test duration by the `--duration`/`-d` CLI flag (or the `K6_DURATION` environment variable and the `duration` script/JSON option). For ease of use, `duration` is specified with human readable values like `1h30m10s` - `k6 run --duration 30s script.js`, `k6 cloud -d 15m10s script.js`, `export K6_DURATION=1h`, etc. If set to `0`, k6 wouldn't stop executing the script unless the user manually stops it. - Set the total number of script iterations with the `--iterations`/`-i` CLI flag (or the `K6_ITERATIONS` environment variable and the `iterations` script/JSON option). k6 will stop executing the script whenever the **total** number of iterations (i.e. the number of iterations across all VUs) reaches the specified number. So if you have `k6 run --iterations 10 --vus 10 script.js`, then each VU would make only a single iteration. -- Set both the test duration and the total number of script iterations. In that case, k6 would stop the script execution whenever either one of the above conditions is reached first. For more complex cases, you can specify execution stages. They are a combination of `duration,target-VUs` pairs. These pairs instruct k6 to linearly ramp up, ramp down, or stay at the number of VUs specified for the period specified. Execution stages can be set via the `stages` script/JSON option as an array of `{ duration: ..., target: ... }` pairs, or with the `--stage`/`-s` CLI flags and the `K6_STAGE` environment variable via the `duration:target,duration:target...` syntax. diff --git a/cmd/archive.go b/cmd/archive.go index ede0553929c..3fe9734cd8d 100644 --- a/cmd/archive.go +++ b/cmd/archive.go @@ -25,6 +25,7 @@ import ( "github.com/spf13/afero" "github.com/spf13/cobra" + "github.com/spf13/pflag" ) var archiveOut = "archive.tar" @@ -75,6 +76,10 @@ An archive is a fully self-contained test run, and can be executed identically e return err } + if cerr := validateConfig(conf); cerr != nil { + return ExitCode{cerr, invalidConfigErrorCode} + } + err = r.SetOptions(conf.Options) if err != nil { return err @@ -90,11 +95,18 @@ An archive is a fully self-contained test run, and can be executed identically e }, } +func archiveCmdFlagSet() *pflag.FlagSet { + flags := pflag.NewFlagSet("", pflag.ContinueOnError) + flags.SortFlags = false + flags.AddFlagSet(optionFlagSet()) + flags.AddFlagSet(runtimeOptionFlagSet(false)) + //TODO: figure out a better way to handle the CLI flags - global variables are not very testable... :/ + flags.StringVarP(&archiveOut, "archive-out", "O", archiveOut, "archive output filename") + return flags +} + func init() { RootCmd.AddCommand(archiveCmd) archiveCmd.Flags().SortFlags = false - archiveCmd.Flags().AddFlagSet(optionFlagSet()) - archiveCmd.Flags().AddFlagSet(runtimeOptionFlagSet(false)) - archiveCmd.Flags().AddFlagSet(configFileFlagSet()) - archiveCmd.Flags().StringVarP(&archiveOut, "archive-out", "O", archiveOut, "archive output filename") + archiveCmd.Flags().AddFlagSet(archiveCmdFlagSet()) } diff --git a/cmd/cloud.go b/cmd/cloud.go index 1850f27d93e..89f71aaf0a6 100644 --- a/cmd/cloud.go +++ b/cmd/cloud.go @@ -36,6 +36,7 @@ import ( "github.com/pkg/errors" "github.com/spf13/afero" "github.com/spf13/cobra" + "github.com/spf13/pflag" log "github.com/sirupsen/logrus" ) @@ -54,7 +55,8 @@ This will execute the test on the Load Impact cloud service. Use "k6 login cloud k6 cloud script.js`[1:], Args: exactArgsWithMsg(1, "arg should either be \"-\", if reading script from stdin, or a path to a script file"), RunE: func(cmd *cobra.Command, args []string) error { - _, _ = BannerColor.Fprint(stdout, Banner+"\n\n") + //TODO: disable in quiet mode? + _, _ = BannerColor.Fprintf(stdout, "\n%s\n\n", Banner) initBar := ui.ProgressBar{ Width: 60, Left: func() string { return " uploading script" }, @@ -93,6 +95,10 @@ This will execute the test on the Load Impact cloud service. Use "k6 login cloud return err } + if cerr := validateConfig(conf); cerr != nil { + return ExitCode{cerr, invalidConfigErrorCode} + } + err = r.SetOptions(conf.Options) if err != nil { return err @@ -235,10 +241,26 @@ This will execute the test on the Load Impact cloud service. Use "k6 login cloud }, } +func cloudCmdFlagSet() *pflag.FlagSet { + flags := pflag.NewFlagSet("", pflag.ContinueOnError) + flags.SortFlags = false + flags.AddFlagSet(optionFlagSet()) + flags.AddFlagSet(runtimeOptionFlagSet(false)) + + //TODO: Figure out a better way to handle the CLI flags: + // - the default value is specified in this way so we don't overwrire whatever + // was specified via the environment variable + // - global variables are not very testable... :/ + flags.BoolVar(&exitOnRunning, "exit-on-running", exitOnRunning, "exits when test reaches the running status") + // We also need to explicitly set the default value for the usage message here, so setting + // K6_EXIT_ON_RUNNING=true won't affect the usage message + flags.Lookup("exit-on-running").DefValue = "false" + + return flags +} + func init() { RootCmd.AddCommand(cloudCmd) cloudCmd.Flags().SortFlags = false - cloudCmd.Flags().AddFlagSet(optionFlagSet()) - cloudCmd.Flags().AddFlagSet(runtimeOptionFlagSet(false)) - cloudCmd.Flags().BoolVar(&exitOnRunning, "exit-on-running", exitOnRunning, "exits when test reaches the running status") + cloudCmd.Flags().AddFlagSet(cloudCmdFlagSet()) } diff --git a/cmd/common.go b/cmd/common.go index 0de51ed3038..d3964bce45f 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -68,6 +68,8 @@ func (w consoleWriter) Write(p []byte) (n int, err error) { return } +//TODO: refactor the CLI config so these functions aren't needed - they +// can mask errors by failing only at runtime, not at compile time func getNullBool(flags *pflag.FlagSet, key string) null.Bool { v, err := flags.GetBool(key) if err != nil { diff --git a/cmd/config.go b/cmd/config.go index b1637cf259c..139b1e01286 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -1,7 +1,7 @@ /* * * k6 - a next-generation load testing tool - * Copyright (C) 2016 Load Impact + * Copyright (C) 2019 Load Impact * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as @@ -22,34 +22,27 @@ package cmd import ( "encoding/json" - "io/ioutil" + "fmt" "os" + "path/filepath" + "strings" + + "errors" "github.com/kelseyhightower/envconfig" "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/scheduler" "github.com/loadimpact/k6/stats/cloud" "github.com/loadimpact/k6/stats/datadog" "github.com/loadimpact/k6/stats/influxdb" "github.com/loadimpact/k6/stats/kafka" "github.com/loadimpact/k6/stats/statsd/common" - "github.com/shibukawa/configdir" + log "github.com/sirupsen/logrus" "github.com/spf13/afero" "github.com/spf13/pflag" null "gopkg.in/guregu/null.v3" ) -const configFilename = "config.json" - -var configDirs = configdir.New("loadimpact", "k6") -var configFile = os.Getenv("K6_CONFIG") // overridden by `-c` flag! - -// configFileFlagSet returns a FlagSet that contains flags needed for specifying a config file. -func configFileFlagSet() *pflag.FlagSet { - flags := pflag.NewFlagSet("", 0) - flags.StringVarP(&configFile, "config", "c", configFile, "specify config file to read") - return flags -} - // configFlagSet returns a FlagSet with the default run configuration flags. func configFlagSet() *pflag.FlagSet { flags := pflag.NewFlagSet("", 0) @@ -59,7 +52,6 @@ func configFlagSet() *pflag.FlagSet { flags.Bool("no-usage-report", false, "don't send anonymous stats to the developers") flags.Bool("no-thresholds", false, "don't run thresholds") flags.Bool("no-summary", false, "don't show the summary at the end of the test") - flags.AddFlagSet(configFileFlagSet()) return flags } @@ -126,41 +118,51 @@ func getConfig(flags *pflag.FlagSet) (Config, error) { }, nil } -// Reads a configuration file from disk. -func readDiskConfig(fs afero.Fs) (Config, *configdir.Config, error) { - if configFile != "" { - data, err := ioutil.ReadFile(configFile) - if err != nil { - return Config{}, nil, err - } - var conf Config - err = json.Unmarshal(data, &conf) - return conf, nil, err +// Reads the configuration file from the supplied filesystem and returns it and its path. +// It will first try to see if the user explicitly specified a custom config file and will +// try to read that. If there's a custom config specified and it couldn't be read or parsed, +// an error will be returned. +// If there's no custom config specified and no file exists in the default config path, it will +// return an empty config struct, the default config location and *no* error. +func readDiskConfig(fs afero.Fs) (Config, string, error) { + realConfigFilePath := configFilePath + if realConfigFilePath == "" { + // The user didn't specify K6_CONFIG or --config, use the default path + realConfigFilePath = defaultConfigFilePath } - cdir := configDirs.QueryFolderContainsFile(configFilename) - if cdir == nil { - return Config{}, configDirs.QueryFolders(configdir.Global)[0], nil + // Try to see if the file exists in the supplied filesystem + if _, err := fs.Stat(realConfigFilePath); err != nil { + if os.IsNotExist(err) && configFilePath == "" { + // If the file doesn't exist, but it was the default config file (i.e. the user + // didn't specify anything), silence the error + err = nil + } + return Config{}, realConfigFilePath, err } - data, err := cdir.ReadFile(configFilename) + + data, err := afero.ReadFile(fs, realConfigFilePath) if err != nil { - return Config{}, cdir, err + return Config{}, realConfigFilePath, err } var conf Config err = json.Unmarshal(data, &conf) - return conf, cdir, err + return conf, realConfigFilePath, err } -// Writes configuration back to disk. -func writeDiskConfig(fs afero.Fs, cdir *configdir.Config, conf Config) error { +// Serializes the configuration to a JSON file and writes it in the supplied +// location on the supplied filesystem +func writeDiskConfig(fs afero.Fs, configPath string, conf Config) error { data, err := json.MarshalIndent(conf, "", " ") if err != nil { return err } - if configFile != "" { - return afero.WriteFile(fs, configFilename, data, 0644) + + if err := fs.MkdirAll(filepath.Dir(configPath), 0755); err != nil { + return err } - return cdir.WriteFile(configFilename, data) + + return afero.WriteFile(fs, configPath, data, 0644) } // Reads configuration variables from the environment. @@ -177,6 +179,100 @@ func readEnvConfig() (conf Config, err error) { return conf, nil } +type executionConflictConfigError string + +func (e executionConflictConfigError) Error() string { + return string(e) +} + +var _ error = executionConflictConfigError("") + +// This checks for conflicting options and turns any shortcut options (i.e. duration, iterations, +// stages) into the proper scheduler configuration +func buildExecutionConfig(conf Config) (Config, error) { + result := conf + switch { + case conf.Duration.Valid: + if conf.Iterations.Valid { + //TODO: make this an executionConflictConfigError in the next version + log.Warnf("Specifying both duration and iterations is deprecated and won't be supported in the future k6 versions") + } + + if len(conf.Stages) > 0 { // stages isn't nil (not set) and isn't explicitly set to empty + //TODO: make this an executionConflictConfigError in the next version + log.Warnf("Specifying both duration and stages is deprecated and won't be supported in the future k6 versions") + } + + if conf.Execution != nil { + return result, executionConflictConfigError("specifying both duration and execution is not supported") + } + + if conf.Duration.Duration <= 0 { + //TODO: make this an executionConflictConfigError in the next version + log.Warnf("Specifying infinite duration in this way is deprecated and won't be supported in the future k6 versions") + } else { + ds := scheduler.NewConstantLoopingVUsConfig(lib.DefaultSchedulerName) + ds.VUs = conf.VUs + ds.Duration = conf.Duration + ds.Interruptible = null.NewBool(true, false) // Preserve backwards compatibility + result.Execution = scheduler.ConfigMap{lib.DefaultSchedulerName: ds} + } + + case len(conf.Stages) > 0: // stages isn't nil (not set) and isn't explicitly set to empty + if conf.Iterations.Valid { + //TODO: make this an executionConflictConfigError in the next version + log.Warnf("Specifying both iterations and stages is deprecated and won't be supported in the future k6 versions") + } + + if conf.Execution != nil { + return conf, executionConflictConfigError("specifying both stages and execution is not supported") + } + + ds := scheduler.NewVariableLoopingVUsConfig(lib.DefaultSchedulerName) + ds.StartVUs = conf.VUs + for _, s := range conf.Stages { + if s.Duration.Valid { + ds.Stages = append(ds.Stages, scheduler.Stage{Duration: s.Duration, Target: s.Target}) + } + } + ds.Interruptible = null.NewBool(true, false) // Preserve backwards compatibility + result.Execution = scheduler.ConfigMap{lib.DefaultSchedulerName: ds} + + case conf.Iterations.Valid: + if conf.Execution != nil { + return conf, executionConflictConfigError("specifying both iterations and execution is not supported") + } + // TODO: maybe add a new flag that will be used as a shortcut to per-VU iterations? + + ds := scheduler.NewSharedIterationsConfig(lib.DefaultSchedulerName) + ds.VUs = conf.VUs + ds.Iterations = conf.Iterations + result.Execution = scheduler.ConfigMap{lib.DefaultSchedulerName: ds} + + default: + if conf.Execution != nil { // If someone set this, regardless if its empty + //TODO: remove this warning in the next version + log.Warnf("The execution settings are not functional in this k6 release, they will be ignored") + } + + if len(conf.Execution) == 0 { // If unset or set to empty + // No execution parameters whatsoever were specified, so we'll create a per-VU iterations config + // with 1 VU and 1 iteration. We're choosing the per-VU config, since that one could also + // be executed both locally, and in the cloud. + result.Execution = scheduler.ConfigMap{ + lib.DefaultSchedulerName: scheduler.NewPerVUIterationsConfig(lib.DefaultSchedulerName), + } + } + } + + //TODO: validate the config; questions: + // - separately validate the duration, iterations and stages for better error messages? + // - or reuse the execution validation somehow, at the end? or something mixed? + // - here or in getConsolidatedConfig() or somewhere else? + + return result, nil +} + // Assemble the final consolidated configuration from all of the different sources: // - start with the CLI-provided options to get shadowed (non-Valid) defaults in there // - add the global file config options @@ -185,6 +281,7 @@ func readEnvConfig() (conf Config, err error) { // - merge the user-supplied CLI flags back in on top, to give them the greatest priority // - set some defaults if they weren't previously specified // TODO: add better validation, more explicit default values and improve consistency between formats +// TODO: accumulate all errors and differentiate between the layers? func getConsolidatedConfig(fs afero.Fs, cliConf Config, runner lib.Runner) (conf Config, err error) { cliConf.Collectors.InfluxDB = influxdb.NewConfig().Apply(cliConf.Collectors.InfluxDB) cliConf.Collectors.Cloud = cloud.NewConfig().Apply(cliConf.Collectors.Cloud) @@ -205,5 +302,24 @@ func getConsolidatedConfig(fs afero.Fs, cliConf Config, runner lib.Runner) (conf } conf = conf.Apply(envConf).Apply(cliConf) - return conf, nil + return buildExecutionConfig(conf) +} + +//TODO: remove ↓ +//nolint:unparam +func validateConfig(conf Config) error { + errList := conf.Validate() + if len(errList) == 0 { + return nil + } + + errMsgParts := []string{"There were problems with the specified script configuration:"} + for _, err := range errList { + errMsgParts = append(errMsgParts, fmt.Sprintf("\t- %s", err.Error())) + } + errMsg := errors.New(strings.Join(errMsgParts, "\n")) + + //TODO: actually return the error here instead of warning, so k6 aborts on config validation errors + log.Warn(errMsg) + return nil } diff --git a/cmd/config_consolidation_test.go b/cmd/config_consolidation_test.go new file mode 100644 index 00000000000..fd8918aed3a --- /dev/null +++ b/cmd/config_consolidation_test.go @@ -0,0 +1,471 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ +package cmd + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "testing" + "time" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/scheduler" + "github.com/loadimpact/k6/lib/testutils" + "github.com/loadimpact/k6/lib/types" + log "github.com/sirupsen/logrus" + "github.com/spf13/afero" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + null "gopkg.in/guregu/null.v3" +) + +// A helper funcion for setting arbitrary environment variables and +// restoring the old ones at the end, usually by deferring the returned callback +//TODO: remove these hacks when we improve the configuration... we shouldn't +// have to mess with the global environment at all... +func setEnv(t *testing.T, newEnv []string) (restoreEnv func()) { + actuallSetEnv := func(env []string, abortOnSetErr bool) { + os.Clearenv() + for _, e := range env { + val := "" + pair := strings.SplitN(e, "=", 2) + if len(pair) > 1 { + val = pair[1] + } + err := os.Setenv(pair[0], val) + if abortOnSetErr { + require.NoError(t, err) + } else if err != nil { + t.Logf( + "Received a non-aborting but unexpected error '%s' when setting env.var '%s' to '%s'", + err, pair[0], val, + ) + } + } + } + oldEnv := os.Environ() + actuallSetEnv(newEnv, true) + + return func() { + actuallSetEnv(oldEnv, false) + } +} + +func verifyOneIterPerOneVU(t *testing.T, c Config) { + // No config anywhere should result in a 1 VU with a 1 uninterruptible iteration config + sched := c.Execution[lib.DefaultSchedulerName] + require.NotEmpty(t, sched) + require.IsType(t, scheduler.PerVUIteationsConfig{}, sched) + perVuIters, ok := sched.(scheduler.PerVUIteationsConfig) + require.True(t, ok) + assert.Equal(t, null.NewBool(false, false), perVuIters.Interruptible) + assert.Equal(t, null.NewInt(1, false), perVuIters.Iterations) + assert.Equal(t, null.NewInt(1, false), perVuIters.VUs) +} + +func verifySharedIters(vus, iters null.Int) func(t *testing.T, c Config) { + return func(t *testing.T, c Config) { + sched := c.Execution[lib.DefaultSchedulerName] + require.NotEmpty(t, sched) + require.IsType(t, scheduler.SharedIteationsConfig{}, sched) + sharedIterConfig, ok := sched.(scheduler.SharedIteationsConfig) + require.True(t, ok) + assert.Equal(t, vus, sharedIterConfig.VUs) + assert.Equal(t, iters, sharedIterConfig.Iterations) + assert.Equal(t, vus, c.VUs) + assert.Equal(t, iters, c.Iterations) + } +} + +func verifyConstLoopingVUs(vus null.Int, duration time.Duration) func(t *testing.T, c Config) { + return func(t *testing.T, c Config) { + sched := c.Execution[lib.DefaultSchedulerName] + require.NotEmpty(t, sched) + require.IsType(t, scheduler.ConstantLoopingVUsConfig{}, sched) + clvc, ok := sched.(scheduler.ConstantLoopingVUsConfig) + require.True(t, ok) + assert.Equal(t, null.NewBool(true, false), clvc.Interruptible) + assert.Equal(t, vus, clvc.VUs) + assert.Equal(t, types.NullDurationFrom(duration), clvc.Duration) + assert.Equal(t, vus, c.VUs) + assert.Equal(t, types.NullDurationFrom(duration), c.Duration) + } +} + +func verifyVarLoopingVUs(startVus null.Int, stages []scheduler.Stage) func(t *testing.T, c Config) { + return func(t *testing.T, c Config) { + sched := c.Execution[lib.DefaultSchedulerName] + require.NotEmpty(t, sched) + require.IsType(t, scheduler.VariableLoopingVUsConfig{}, sched) + clvc, ok := sched.(scheduler.VariableLoopingVUsConfig) + require.True(t, ok) + assert.Equal(t, null.NewBool(true, false), clvc.Interruptible) + assert.Equal(t, startVus, clvc.StartVUs) + assert.Equal(t, startVus, c.VUs) + assert.Equal(t, stages, clvc.Stages) + assert.Len(t, c.Stages, len(stages)) + for i, s := range stages { + assert.Equal(t, s.Duration, c.Stages[i].Duration) + assert.Equal(t, s.Target, c.Stages[i].Target) + } + } +} + +// A helper function that accepts (duration in second, VUs) pairs and returns +// a valid slice of stage structs +func buildStages(durationsAndVUs ...int64) []scheduler.Stage { + l := len(durationsAndVUs) + if l%2 != 0 { + panic("wrong len") + } + result := make([]scheduler.Stage, 0, l/2) + for i := 0; i < l; i += 2 { + result = append(result, scheduler.Stage{ + Duration: types.NullDurationFrom(time.Duration(durationsAndVUs[i]) * time.Second), + Target: null.IntFrom(durationsAndVUs[i+1]), + }) + } + return result +} + +func mostFlagSets() []flagSetInit { + //TODO: make this unnecessary... currently these are the only commands in which + // getConsolidatedConfig() is used, but they also have differences in their CLI flags :/ + // sigh... compromises... + result := []flagSetInit{} + for i, fsi := range []flagSetInit{runCmdFlagSet, archiveCmdFlagSet, cloudCmdFlagSet} { + i, fsi := i, fsi // go... + result = append(result, func() *pflag.FlagSet { + flags := pflag.NewFlagSet(fmt.Sprintf("superContrivedFlags_%d", i), pflag.ContinueOnError) + flags.AddFlagSet(rootCmdPersistentFlagSet()) + flags.AddFlagSet(fsi()) + return flags + }) + } + return result +} + +type file struct { + filepath, contents string +} + +func getFS(files []file) afero.Fs { + fs := afero.NewMemMapFs() + for _, f := range files { + must(afero.WriteFile(fs, f.filepath, []byte(f.contents), 0644)) // modes don't matter in the afero.MemMapFs + } + return fs +} + +func defaultConfig(jsonConfig string) afero.Fs { + return getFS([]file{{defaultConfigFilePath, jsonConfig}}) +} + +type flagSetInit func() *pflag.FlagSet + +type opts struct { + cli []string + env []string + runner *lib.Options + fs afero.Fs + + //TODO: remove this when the configuration is more reproducible and sane... + // We use a func, because initializing a FlagSet that points to variables + // actually will change those variables to their default values :| In our + // case, this happens only some of the time, for global variables that + // are configurable only via CLI flags, but not environment variables. + // + // For the rest, their default value is their current value, since that + // has been set from the environment variable. That has a bunch of other + // issues on its own, and the func() doesn't help at all, and we need to + // use the resetStickyGlobalVars() hack on top of that... + cliFlagSetInits []flagSetInit +} + +func resetStickyGlobalVars() { + //TODO: remove after fixing the config, obviously a dirty hack + exitOnRunning = false + configFilePath = "" + runType = "" + runNoSetup = false + runNoTeardown = false +} + +// Something that makes the test also be a valid io.Writer, useful for passing it +// as an output for logs and CLI flag help messages... +type testOutput struct{ *testing.T } + +func (to testOutput) Write(p []byte) (n int, err error) { + to.Logf("%s", p) + return len(p), nil +} + +var _ io.Writer = testOutput{} + +// exp contains the different events or errors we expect our test case to trigger. +// for space and clarity, we use the fact that by default, all of the struct values are false +type exp struct { + cliParseError bool + cliReadError bool + consolidationError bool + validationErrors bool + logWarning bool //TODO: remove in the next version? +} + +// A hell of a complicated test case, that still doesn't test things fully... +type configConsolidationTestCase struct { + options opts + expected exp + customValidator func(t *testing.T, c Config) +} + +func getConfigConsolidationTestCases() []configConsolidationTestCase { + I := null.IntFrom // shortcut for "Valid" (i.e. user-specified) ints + // This is a function, because some of these test cases actually need for the init() functions + // to be executed, since they depend on defaultConfigFilePath + return []configConsolidationTestCase{ + // Check that no options will result in 1 VU 1 iter value for execution + {opts{}, exp{}, verifyOneIterPerOneVU}, + // Verify some CLI errors + {opts{cli: []string{"--blah", "blah"}}, exp{cliParseError: true}, nil}, + {opts{cli: []string{"--duration", "blah"}}, exp{cliParseError: true}, nil}, + {opts{cli: []string{"--iterations", "blah"}}, exp{cliParseError: true}, nil}, + {opts{cli: []string{"--execution", ""}}, exp{cliParseError: true}, nil}, + {opts{cli: []string{"--stage", "10:20s"}}, exp{cliReadError: true}, nil}, + // Check if CLI shortcuts generate correct execution values + {opts{cli: []string{"--vus", "1", "--iterations", "5"}}, exp{}, verifySharedIters(I(1), I(5))}, + {opts{cli: []string{"-u", "2", "-i", "6"}}, exp{}, verifySharedIters(I(2), I(6))}, + {opts{cli: []string{"-d", "123s"}}, exp{}, verifyConstLoopingVUs(null.NewInt(1, false), 123*time.Second)}, + {opts{cli: []string{"-u", "3", "-d", "30s"}}, exp{}, verifyConstLoopingVUs(I(3), 30*time.Second)}, + {opts{cli: []string{"-u", "4", "--duration", "60s"}}, exp{}, verifyConstLoopingVUs(I(4), 1*time.Minute)}, + { + opts{cli: []string{"--stage", "20s:10", "-s", "3m:5"}}, exp{}, + verifyVarLoopingVUs(null.NewInt(1, false), buildStages(20, 10, 180, 5)), + }, + { + opts{cli: []string{"-s", "1m6s:5", "--vus", "10"}}, exp{}, + verifyVarLoopingVUs(null.NewInt(10, true), buildStages(66, 5)), + }, + // This should get a validation error since VUs are more than the shared iterations + {opts{cli: []string{"--vus", "10", "-i", "6"}}, exp{validationErrors: true}, verifySharedIters(I(10), I(6))}, + // These should emit a warning + //TODO: in next version, those should be an error + {opts{cli: []string{"-u", "1", "-i", "6", "-d", "10s"}}, exp{logWarning: true}, nil}, + {opts{cli: []string{"-u", "2", "-d", "10s", "-s", "10s:20"}}, exp{logWarning: true}, nil}, + {opts{cli: []string{"-u", "3", "-i", "5", "-s", "10s:20"}}, exp{logWarning: true}, nil}, + {opts{cli: []string{"-u", "3", "-d", "0"}}, exp{logWarning: true}, nil}, + { + opts{runner: &lib.Options{ + VUs: null.IntFrom(5), + Duration: types.NullDurationFrom(44 * time.Second), + Iterations: null.IntFrom(10), + }}, exp{logWarning: true}, nil, + }, + {opts{fs: defaultConfig(`{"execution": {}}`)}, exp{logWarning: true}, verifyOneIterPerOneVU}, + // Test if environment variable shortcuts are working as expected + {opts{env: []string{"K6_VUS=5", "K6_ITERATIONS=15"}}, exp{}, verifySharedIters(I(5), I(15))}, + {opts{env: []string{"K6_VUS=10", "K6_DURATION=20s"}}, exp{}, verifyConstLoopingVUs(I(10), 20*time.Second)}, + { + opts{env: []string{"K6_STAGES=2m30s:11,1h1m:100"}}, exp{}, + verifyVarLoopingVUs(null.NewInt(1, false), buildStages(150, 11, 3660, 100)), + }, + { + opts{env: []string{"K6_STAGES=100s:100,0m30s:0", "K6_VUS=0"}}, exp{}, + verifyVarLoopingVUs(null.NewInt(0, true), buildStages(100, 100, 30, 0)), + }, + // Test if JSON configs work as expected + {opts{fs: defaultConfig(`{"iterations": 77, "vus": 7}`)}, exp{}, verifySharedIters(I(7), I(77))}, + {opts{fs: defaultConfig(`wrong-json`)}, exp{consolidationError: true}, nil}, + {opts{fs: getFS(nil), cli: []string{"--config", "/my/config.file"}}, exp{consolidationError: true}, nil}, + + // Test combinations between options and levels + { + opts{ + fs: getFS([]file{{"/my/config.file", `{"vus": 8, "duration": "2m"}`}}), + cli: []string{"--config", "/my/config.file"}, + }, exp{}, verifyConstLoopingVUs(I(8), 120*time.Second), + }, + { + opts{ + fs: defaultConfig(`{"stages": [{"duration": "20s", "target": 20}], "vus": 10}`), + env: []string{"K6_DURATION=15s"}, + cli: []string{"--stage", ""}, + }, + exp{}, verifyConstLoopingVUs(I(10), 15*time.Second), + }, + { + opts{ + runner: &lib.Options{VUs: null.IntFrom(5), Duration: types.NullDurationFrom(50 * time.Second)}, + cli: []string{"--iterations", "5"}, + }, + //TODO: this shouldn't be a warning in the next version, but the result will be different + exp{logWarning: true}, verifyConstLoopingVUs(I(5), 50*time.Second), + }, + { + opts{ + fs: defaultConfig(`{"stages": [{"duration": "20s", "target": 10}]}`), + runner: &lib.Options{VUs: null.IntFrom(5)}, + }, + exp{}, + verifyVarLoopingVUs(null.NewInt(5, true), buildStages(20, 10)), + }, + { + opts{ + fs: defaultConfig(`{"stages": [{"duration": "20s", "target": 10}]}`), + runner: &lib.Options{VUs: null.IntFrom(5)}, + env: []string{"K6_VUS=15", "K6_ITERATIONS=15"}, + }, + exp{logWarning: true}, //TODO: this won't be a warning in the next version, but the result will be different + verifyVarLoopingVUs(null.NewInt(15, true), buildStages(20, 10)), + }, + { + opts{ + fs: defaultConfig(`{"stages": [{"duration": "11s", "target": 11}]}`), + runner: &lib.Options{VUs: null.IntFrom(22)}, + env: []string{"K6_VUS=33"}, + cli: []string{"--stage", "44s:44", "-s", "55s:55"}, + }, + exp{}, + verifyVarLoopingVUs(null.NewInt(33, true), buildStages(44, 44, 55, 55)), + }, + + //TODO: test the future full overwriting of the duration/iterations/stages/execution options + { + opts{ + fs: defaultConfig(`{ + "execution": { "someKey": { + "type": "constant-looping-vus", "vus": 10, "duration": "60s", "interruptible": false, + "iterationTimeout": "10s", "startTime": "70s", "env": {"test": "mest"}, "exec": "someFunc" + }}}`), + env: []string{"K6_ITERATIONS=25"}, + cli: []string{"--vus", "12"}, + }, + exp{}, verifySharedIters(I(12), I(25)), + }, + + // Just in case, verify that no options will result in the same 1 vu 1 iter config + {opts{}, exp{}, verifyOneIterPerOneVU}, + //TODO: test for differences between flagsets + //TODO: more tests in general, especially ones not related to execution parameters... + } +} + +func runTestCase( + t *testing.T, + testCase configConsolidationTestCase, + newFlagSet flagSetInit, + logHook *testutils.SimpleLogrusHook, +) { + t.Logf("Test with opts=%#v and exp=%#v\n", testCase.options, testCase.expected) + log.SetOutput(testOutput{t}) + logHook.Drain() + + restoreEnv := setEnv(t, testCase.options.env) + defer restoreEnv() + + flagSet := newFlagSet() + defer resetStickyGlobalVars() + flagSet.SetOutput(testOutput{t}) + flagSet.PrintDefaults() + + cliErr := flagSet.Parse(testCase.options.cli) + if testCase.expected.cliParseError { + require.Error(t, cliErr) + return + } + require.NoError(t, cliErr) + + //TODO: remove these hacks when we improve the configuration... + var cliConf Config + if flagSet.Lookup("out") != nil { + cliConf, cliErr = getConfig(flagSet) + } else { + opts, errOpts := getOptions(flagSet) + cliConf, cliErr = Config{Options: opts}, errOpts + } + if testCase.expected.cliReadError { + require.Error(t, cliErr) + return + } + require.NoError(t, cliErr) + + var runner lib.Runner + if testCase.options.runner != nil { + runner = &lib.MiniRunner{Options: *testCase.options.runner} + } + if testCase.options.fs == nil { + t.Logf("Creating an empty FS for this test") + testCase.options.fs = afero.NewMemMapFs() // create an empty FS if it wasn't supplied + } + + result, err := getConsolidatedConfig(testCase.options.fs, cliConf, runner) + if testCase.expected.consolidationError { + require.Error(t, err) + return + } + require.NoError(t, err) + + warnings := logHook.Drain() + if testCase.expected.logWarning { + assert.NotEmpty(t, warnings) + } else { + assert.Empty(t, warnings) + } + + validationErrors := result.Validate() + if testCase.expected.validationErrors { + assert.NotEmpty(t, validationErrors) + } else { + assert.Empty(t, validationErrors) + } + + if testCase.customValidator != nil { + testCase.customValidator(t, result) + } +} + +func TestConfigConsolidation(t *testing.T) { + // This test and its subtests shouldn't be ran in parallel, since they unfortunately have + // to mess with shared global objects (env vars, variables, the log, ... santa?) + logHook := testutils.SimpleLogrusHook{HookedLevels: []log.Level{log.WarnLevel}} + log.AddHook(&logHook) + log.SetOutput(ioutil.Discard) + defer log.SetOutput(os.Stderr) + + for tcNum, testCase := range getConfigConsolidationTestCases() { + flagSetInits := testCase.options.cliFlagSetInits + if flagSetInits == nil { // handle the most common case + flagSetInits = mostFlagSets() + } + for fsNum, flagSet := range flagSetInits { + // I want to paralelize this, but I cannot... due to global variables and other + // questionable architectural choices... :| + testCase, flagSet := testCase, flagSet + t.Run( + fmt.Sprintf("TestCase#%d_FlagSet#%d", tcNum, fsNum), + func(t *testing.T) { runTestCase(t, testCase, flagSet, &logHook) }, + ) + } + } +} diff --git a/cmd/config_test.go b/cmd/config_test.go index 7d7be3187f7..270de7fa5a4 100644 --- a/cmd/config_test.go +++ b/cmd/config_test.go @@ -1,7 +1,7 @@ /* * * k6 - a next-generation load testing tool - * Copyright (C) 2016 Load Impact + * Copyright (C) 2019 Load Impact * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as @@ -83,8 +83,6 @@ func TestConfigCmd(t *testing.T) { } } -//TODO: write end-to-end configuration tests - how the config file, in-script options, environment variables and CLI flags are parsed and interact... and how the end result is then propagated back into the script - func TestConfigEnv(t *testing.T) { testdata := map[struct{ Name, Key string }]map[string]func(Config){ {"Linger", "K6_LINGER"}: { diff --git a/cmd/login_cloud.go b/cmd/login_cloud.go index c3523003887..2cbc101541f 100644 --- a/cmd/login_cloud.go +++ b/cmd/login_cloud.go @@ -57,7 +57,7 @@ This will set the default token used when just "k6 run -o cloud" is passed.`, return err } - currentDiskConf, cdir, err := readDiskConfig(fs) + currentDiskConf, configPath, err := readDiskConfig(fs) if err != nil { return err } @@ -109,7 +109,7 @@ This will set the default token used when just "k6 run -o cloud" is passed.`, } currentDiskConf.Collectors.Cloud = newCloudConf - if err := writeDiskConfig(fs, cdir, currentDiskConf); err != nil { + if err := writeDiskConfig(fs, configPath, currentDiskConf); err != nil { return err } diff --git a/cmd/login_influxdb.go b/cmd/login_influxdb.go index 1ff785babe6..134e471fdaf 100644 --- a/cmd/login_influxdb.go +++ b/cmd/login_influxdb.go @@ -42,7 +42,7 @@ This will set the default server used when just "-o influxdb" is passed.`, Args: cobra.MaximumNArgs(1), RunE: func(cmd *cobra.Command, args []string) error { fs := afero.NewOsFs() - config, cdir, err := readDiskConfig(fs) + config, configPath, err := readDiskConfig(fs) if err != nil { return err } @@ -105,7 +105,7 @@ This will set the default server used when just "-o influxdb" is passed.`, } config.Collectors.InfluxDB = conf - return writeDiskConfig(fs, cdir, config) + return writeDiskConfig(fs, configPath, config) }, } diff --git a/cmd/root.go b/cmd/root.go index 521ba5c4be3..e5f5d558485 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -26,6 +26,7 @@ import ( golog "log" "os" "path/filepath" + "strings" "sync" "github.com/fatih/color" @@ -34,16 +35,23 @@ import ( "github.com/shibukawa/configdir" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/spf13/pflag" ) +// Version contains the current semantic version of k6. +//nolint:gochecknoglobals var Version = "0.23.1" -var Banner = ` - /\ |‾‾| /‾‾/ /‾/ - /\ / \ | |_/ / / / - / \/ \ | | / ‾‾\ - / \ | |‾\ \ | (_) | - / __________ \ |__| \__\ \___/ .io` +// Banner contains the ASCII-art banner with the k6 logo and stylized website URL +//TODO: make these into methods, only the version needs to be a variable +//nolint:gochecknoglobals +var Banner = strings.Join([]string{ + ` /\ |‾‾| /‾‾/ /‾/ `, + ` /\ / \ | |_/ / / / `, + ` / \/ \ | | / ‾‾\ `, + ` / \ | |‾\ \ | (_) | `, + ` / __________ \ |__| \__\ \___/ .io`, +}, "\n") var BannerColor = color.New(color.FgCyan) var ( @@ -54,9 +62,16 @@ var ( stderr = consoleWriter{colorable.NewColorableStderr(), stderrTTY, outMutex} ) -var ( - cfgFile string +const defaultConfigFileName = "config.json" + +//TODO: remove these global variables +//nolint:gochecknoglobals +var defaultConfigFilePath = defaultConfigFileName // Updated with the user's config folder in the init() function below +//nolint:gochecknoglobals +var configFilePath = os.Getenv("K6_CONFIG") // Overridden by `-c`/`--config` flag! +var ( + //TODO: have environment variables for configuring these? hopefully after we move away from global vars though... verbose bool quiet bool noColor bool @@ -68,7 +83,7 @@ var ( var RootCmd = &cobra.Command{ Use: "k6", Short: "a next-generation load generator", - Long: BannerColor.Sprint(Banner), + Long: BannerColor.Sprintf("\n%s", Banner), SilenceUsage: true, SilenceErrors: true, PersistentPreRun: func(cmd *cobra.Command, args []string) { @@ -93,20 +108,34 @@ func Execute() { } } +func rootCmdPersistentFlagSet() *pflag.FlagSet { + flags := pflag.NewFlagSet("", pflag.ContinueOnError) + //TODO: figure out a better way to handle the CLI flags - global variables are not very testable... :/ + flags.BoolVarP(&verbose, "verbose", "v", false, "enable debug logging") + flags.BoolVarP(&quiet, "quiet", "q", false, "disable progress updates") + flags.BoolVar(&noColor, "no-color", false, "disable colored output") + flags.StringVar(&logFmt, "logformat", "", "log output format") + flags.StringVarP(&address, "address", "a", "localhost:6565", "address for the api server") + + //TODO: Fix... This default value needed, so both CLI flags and environment variables work + flags.StringVarP(&configFilePath, "config", "c", configFilePath, "JSON config file") + // And we also need to explicitly set the default value for the usage message here, so things + // like `K6_CONFIG="blah" k6 run -h` don't produce a weird usage message + flags.Lookup("config").DefValue = defaultConfigFilePath + must(cobra.MarkFlagFilename(flags, "config")) + return flags +} + func init() { - defaultConfigPathMsg := "" + // TODO: find a better library... or better yet, simply port the few dozen lines of code for getting the + // per-user config folder in a cross-platform way + configDirs := configdir.New("loadimpact", "k6") configFolders := configDirs.QueryFolders(configdir.Global) if len(configFolders) > 0 { - defaultConfigPathMsg = fmt.Sprintf(" (default %s)", filepath.Join(configFolders[0].Path, configFilename)) + defaultConfigFilePath = filepath.Join(configFolders[0].Path, defaultConfigFileName) } - RootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "enable debug logging") - RootCmd.PersistentFlags().BoolVarP(&quiet, "quiet", "q", false, "disable progress updates") - RootCmd.PersistentFlags().BoolVar(&noColor, "no-color", false, "disable colored output") - RootCmd.PersistentFlags().StringVar(&logFmt, "logformat", "", "log output format") - RootCmd.PersistentFlags().StringVarP(&address, "address", "a", "localhost:6565", "address for the api server") - RootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", "", "config file"+defaultConfigPathMsg) - must(cobra.MarkFlagFilename(RootCmd.PersistentFlags(), "config")) + RootCmd.PersistentFlags().AddFlagSet(rootCmdPersistentFlagSet()) } // fprintf panics when where's an error writing to the supplied io.Writer diff --git a/cmd/run.go b/cmd/run.go index f5b3b432302..c0fcba608ec 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -49,6 +49,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/afero" "github.com/spf13/cobra" + "github.com/spf13/pflag" null "gopkg.in/guregu/null.v3" ) @@ -61,9 +62,11 @@ const ( teardownTimeoutErrorCode = 101 genericTimeoutErrorCode = 102 genericEngineErrorCode = 103 + invalidConfigErrorCode = 104 ) var ( + //TODO: fix this, global variables are not very testable... runType = os.Getenv("K6_TYPE") runNoSetup = os.Getenv("K6_NO_SETUP") != "" runNoTeardown = os.Getenv("K6_NO_TEARDOWN") != "" @@ -97,7 +100,8 @@ a commandline interface for interacting with it.`, k6 run -o influxdb=http://1.2.3.4:8086/k6`[1:], Args: exactArgsWithMsg(1, "arg should either be \"-\", if reading script from stdin, or a path to a script file"), RunE: func(cmd *cobra.Command, args []string) error { - _, _ = BannerColor.Fprint(stdout, Banner+"\n\n") + //TODO: disable in quiet mode? + _, _ = BannerColor.Fprintf(stdout, "\n%s\n\n", Banner) initBar := ui.ProgressBar{ Width: 60, @@ -160,11 +164,18 @@ a commandline interface for interacting with it.`, ) } + //TODO: move a bunch of the logic above to a config "constructor" and to the Validate() method + // If duration is explicitly set to 0, it means run forever. + //TODO: just... handle this differently, e.g. as a part of the manual executor if conf.Duration.Valid && conf.Duration.Duration == 0 { conf.Duration = types.NullDuration{} } + if cerr := validateConfig(conf); cerr != nil { + return ExitCode{cerr, invalidConfigErrorCode} + } + // If summary trend stats are defined, update the UI to reflect them if len(conf.SummaryTrendStats) > 0 { ui.UpdateTrendColumns(conf.SummaryTrendStats) @@ -459,16 +470,35 @@ a commandline interface for interacting with it.`, }, } +func runCmdFlagSet() *pflag.FlagSet { + flags := pflag.NewFlagSet("", pflag.ContinueOnError) + flags.SortFlags = false + flags.AddFlagSet(optionFlagSet()) + flags.AddFlagSet(runtimeOptionFlagSet(true)) + flags.AddFlagSet(configFlagSet()) + + //TODO: Figure out a better way to handle the CLI flags: + // - the default values are specified in this way so we don't overwrire whatever + // was specified via the environment variables + // - but we need to manually specify the DefValue, since that's the default value + // that will be used in the help/usage message - if we don't set it, the environment + // variables will affect the usage message + // - and finally, global variables are not very testable... :/ + flags.StringVarP(&runType, "type", "t", runType, "override file `type`, \"js\" or \"archive\"") + flags.Lookup("type").DefValue = "" + flags.BoolVar(&runNoSetup, "no-setup", runNoSetup, "don't run setup()") + falseStr := "false" // avoiding goconst warnings... + flags.Lookup("no-setup").DefValue = falseStr + flags.BoolVar(&runNoTeardown, "no-teardown", runNoTeardown, "don't run teardown()") + flags.Lookup("no-teardown").DefValue = falseStr + return flags +} + func init() { RootCmd.AddCommand(runCmd) runCmd.Flags().SortFlags = false - runCmd.Flags().AddFlagSet(optionFlagSet()) - runCmd.Flags().AddFlagSet(runtimeOptionFlagSet(true)) - runCmd.Flags().AddFlagSet(configFlagSet()) - runCmd.Flags().StringVarP(&runType, "type", "t", runType, "override file `type`, \"js\" or \"archive\"") - runCmd.Flags().BoolVar(&runNoSetup, "no-setup", runNoSetup, "don't run setup()") - runCmd.Flags().BoolVar(&runNoTeardown, "no-teardown", runNoTeardown, "don't run teardown()") + runCmd.Flags().AddFlagSet(runCmdFlagSet()) } // Reads a source file from any supported destination. diff --git a/converter/har/converter.go b/converter/har/converter.go index e600974f509..42c88a3467c 100644 --- a/converter/har/converter.go +++ b/converter/har/converter.go @@ -91,7 +91,7 @@ func Convert(h HAR, options lib.Options, minSleep, maxSleep uint, enableChecks b } fprint(w, "\nexport let options = {\n") - options.ForEachValid("json", func(key string, val interface{}) { + options.ForEachSpecified("json", func(key string, val interface{}) { if valJSON, err := json.MarshalIndent(val, " ", " "); err != nil { convertErr = err } else { diff --git a/js/bundle.go b/js/bundle.go index 75185c636dc..3b5de91f38e 100644 --- a/js/bundle.go +++ b/js/bundle.go @@ -224,7 +224,7 @@ func (b *Bundle) Instantiate() (bi *BundleInstance, instErr error) { } else { jsOptionsObj = jsOptions.ToObject(rt) } - b.Options.ForEachValid("json", func(key string, val interface{}) { + b.Options.ForEachSpecified("json", func(key string, val interface{}) { if err := jsOptionsObj.Set(key, val); err != nil { instErr = err } diff --git a/lib/options.go b/lib/options.go index 0b3e0d49b3e..f4a4657e401 100644 --- a/lib/options.go +++ b/lib/options.go @@ -29,12 +29,18 @@ import ( "reflect" "strings" + "github.com/loadimpact/k6/lib/scheduler" "github.com/loadimpact/k6/lib/types" "github.com/loadimpact/k6/stats" "github.com/pkg/errors" "gopkg.in/guregu/null.v3" ) +// DefaultSchedulerName is used as the default key/ID of the scheduler config entries +// that were created due to the use of the shortcut execution control options (i.e. duration+vus, +// iterations+vus, or stages) +const DefaultSchedulerName = "default" + // DefaultSystemTagList includes all of the system tags emitted with metrics by default. // Other tags that are not enabled by default include: iter, vu, ocsp_status, ip var DefaultSystemTagList = []string{ @@ -205,12 +211,16 @@ type Options struct { // Initial values for VUs, max VUs, duration cap, iteration cap, and stages. // See the Runner or Executor interfaces for more information. - VUs null.Int `json:"vus" envconfig:"vus"` + VUs null.Int `json:"vus" envconfig:"vus"` + + //TODO: deprecate this? or reuse it in the manual control "scheduler"? VUsMax null.Int `json:"vusMax" envconfig:"vus_max"` Duration types.NullDuration `json:"duration" envconfig:"duration"` Iterations null.Int `json:"iterations" envconfig:"iterations"` Stages []Stage `json:"stages" envconfig:"stages"` + Execution scheduler.ConfigMap `json:"execution,omitempty" envconfig:"-"` + // Timeouts for the setup() and teardown() functions SetupTimeout types.NullDuration `json:"setupTimeout" envconfig:"setup_timeout"` TeardownTimeout types.NullDuration `json:"teardownTimeout" envconfig:"teardown_timeout"` @@ -310,6 +320,23 @@ func (o Options) Apply(opts Options) Options { if opts.VUsMax.Valid { o.VUsMax = opts.VUsMax } + + // Specifying duration, iterations, stages, or execution in a "higher" config tier + // will overwrite all of the the previous execution settings (if any) from any + // "lower" config tiers + // Still, if more than one of those options is simultaneously specified in the same + // config tier, they will be preserved, so the validation after we've consolidated + // all of the options can return an error. + if opts.Duration.Valid || opts.Iterations.Valid || opts.Stages != nil || opts.Execution != nil { + //TODO: uncomment this after we start using the new schedulers + /* + o.Duration = types.NewNullDuration(0, false) + o.Iterations = null.NewInt(0, false) + o.Stages = nil + */ + o.Execution = nil + } + if opts.Duration.Valid { o.Duration = opts.Duration } @@ -324,6 +351,13 @@ func (o Options) Apply(opts Options) Options { } } } + // o.Execution can also be populated by the duration/iterations/stages config shortcuts, but + // that happens after the configuration from the different sources is consolidated. It can't + // happen here, because something like `K6_ITERATIONS=10 k6 run --vus 5 script.js` wont't + // work correctly at this level. + if opts.Execution != nil { + o.Execution = opts.Execution + } if opts.SetupTimeout.Valid { o.SetupTimeout = opts.SetupTimeout } @@ -412,10 +446,17 @@ func (o Options) Apply(opts Options) Options { return o } -// ForEachValid enumerates all struct fields and calls the supplied function with each +// Validate checks if all of the specified options make sense +func (o Options) Validate() []error { + //TODO: validate all of the other options... that we should have already been validating... + //TODO: maybe integrate an external validation lib: https://github.com/avelino/awesome-go#validation + return o.Execution.Validate() +} + +// ForEachSpecified enumerates all struct fields and calls the supplied function with each // element that is valid. It panics for any unfamiliar or unexpected fields, so make sure // new fields in Options are accounted for. -func (o Options) ForEachValid(structTag string, callback func(key string, value interface{})) { +func (o Options) ForEachSpecified(structTag string, callback func(key string, value interface{})) { structType := reflect.TypeOf(o) structVal := reflect.ValueOf(o) for i := 0; i < structType.NumField(); i++ { diff --git a/lib/options_test.go b/lib/options_test.go index c3bcc8ad69b..63cf4ae891e 100644 --- a/lib/options_test.go +++ b/lib/options_test.go @@ -30,6 +30,7 @@ import ( "time" "github.com/kelseyhightower/envconfig" + "github.com/loadimpact/k6/lib/scheduler" "github.com/loadimpact/k6/lib/types" "github.com/loadimpact/k6/stats" "github.com/stretchr/testify/assert" @@ -87,6 +88,17 @@ func TestOptions(t *testing.T) { assert.Equal(t, oneStage, opts.Apply(Options{Stages: oneStage}).Stages) assert.Equal(t, oneStage, Options{}.Apply(opts).Apply(Options{Stages: oneStage}).Apply(Options{Stages: oneStage}).Stages) }) + t.Run("Execution", func(t *testing.T) { + sched := scheduler.NewConstantLoopingVUsConfig("test") + sched.VUs = null.IntFrom(123) + sched.Duration = types.NullDurationFrom(3 * time.Minute) + opts := Options{}.Apply(Options{Execution: scheduler.ConfigMap{"test": sched}}) + cs, ok := opts.Execution["test"].(scheduler.ConstantLoopingVUsConfig) + assert.True(t, ok) + assert.Equal(t, int64(123), cs.VUs.Int64) + assert.Equal(t, "3m0s", cs.Duration.String()) + }) + //TODO: test that any execution option overwrites any other lower-level options t.Run("RPS", func(t *testing.T) { opts := Options{}.Apply(Options{RPS: null.IntFrom(12345)}) assert.True(t, opts.RPS.Valid) diff --git a/lib/scheduler/base_config.go b/lib/scheduler/base_config.go new file mode 100644 index 00000000000..73a4c22a4b4 --- /dev/null +++ b/lib/scheduler/base_config.go @@ -0,0 +1,103 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package scheduler + +import ( + "fmt" + "time" + + "github.com/loadimpact/k6/lib/types" + null "gopkg.in/guregu/null.v3" +) + +const minPercentage = 0.01 + +// The maximum time k6 will wait after an iteration is supposed to be done +const maxIterationTimeout = 300 * time.Second + +// BaseConfig contains the common config fields for all schedulers +type BaseConfig struct { + Name string `json:"-"` // set via the JS object key + Type string `json:"type"` + StartTime types.NullDuration `json:"startTime"` + Interruptible null.Bool `json:"interruptible"` + IterationTimeout types.NullDuration `json:"iterationTimeout"` + Env map[string]string `json:"env"` + Exec null.String `json:"exec"` // function name, externally validated + Percentage float64 `json:"-"` // 100, unless Split() was called + + //TODO: future extensions like tags, distribution, others? +} + +// NewBaseConfig returns a default base config with the default values +func NewBaseConfig(name, configType string, interruptible bool) BaseConfig { + return BaseConfig{ + Name: name, + Type: configType, + Interruptible: null.NewBool(interruptible, false), + IterationTimeout: types.NewNullDuration(30*time.Second, false), + Percentage: 100, + } +} + +// Validate checks some basic things like present name, type, and a positive start time +func (bc BaseConfig) Validate() (errors []error) { + // Some just-in-case checks, since those things are likely checked in other places or + // even assigned by us: + if bc.Name == "" { + errors = append(errors, fmt.Errorf("scheduler name shouldn't be empty")) + } + if bc.Type == "" { + errors = append(errors, fmt.Errorf("missing or empty type field")) + } + if bc.Percentage < minPercentage || bc.Percentage > 100 { + errors = append(errors, fmt.Errorf( + "percentage should be between %f and 100, but is %f", minPercentage, bc.Percentage, + )) + } + if bc.Exec.Valid && bc.Exec.String == "" { + errors = append(errors, fmt.Errorf("exec value cannot be empty")) + } + // The actually reasonable checks: + if bc.StartTime.Duration < 0 { + errors = append(errors, fmt.Errorf("scheduler start time can't be negative")) + } + iterTimeout := time.Duration(bc.IterationTimeout.Duration) + if iterTimeout < 0 || iterTimeout > maxIterationTimeout { + errors = append(errors, fmt.Errorf( + "the iteration timeout should be between 0 and %s, but is %s", maxIterationTimeout, iterTimeout, + )) + } + return errors +} + +// GetBaseConfig just returns itself +func (bc BaseConfig) GetBaseConfig() BaseConfig { + return bc +} + +// CopyWithPercentage is a helper function that just sets the percentage to +// the specified amount. +func (bc BaseConfig) CopyWithPercentage(percentage float64) *BaseConfig { + c := bc + c.Percentage = percentage + return &c +} diff --git a/lib/scheduler/configmap.go b/lib/scheduler/configmap.go new file mode 100644 index 00000000000..072f26f81e7 --- /dev/null +++ b/lib/scheduler/configmap.go @@ -0,0 +1,128 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package scheduler + +import ( + "encoding/json" + "fmt" + "sync" +) + +// ConfigMap can contain mixed scheduler config types +type ConfigMap map[string]Config + +// ConfigConstructor is a simple function that returns a concrete Config instance +// with the specified name and all default values correctly initialized +type ConfigConstructor func(name string, rawJSON []byte) (Config, error) + +//nolint:gochecknoglobals +var ( + configTypesMutex sync.RWMutex + configConstructors = make(map[string]ConfigConstructor) +) + +// RegisterConfigType adds the supplied ConfigConstructor as the constructor for its +// type in the configConstructors map, in a thread-safe manner +func RegisterConfigType(configType string, constructor ConfigConstructor) { + configTypesMutex.Lock() + defer configTypesMutex.Unlock() + + if constructor == nil { + panic("scheduler configs: constructor is nil") + } + if _, configTypeExists := configConstructors[configType]; configTypeExists { + panic("scheduler configs: RegisterConfigType called twice for " + configType) + } + + configConstructors[configType] = constructor +} + +// GetParsedConfig returns a struct instance corresponding to the supplied +// config type. It will be fully initialized - with both the default values of +// the type, as well as with whatever the user had specified in the JSON +func GetParsedConfig(name, configType string, rawJSON []byte) (result Config, err error) { + configTypesMutex.Lock() + defer configTypesMutex.Unlock() + + constructor, exists := configConstructors[configType] + if !exists { + return nil, fmt.Errorf("unknown execution scheduler type '%s'", configType) + } + return constructor(name, rawJSON) +} + +// UnmarshalJSON implements the json.Unmarshaler interface in a two-step manner, +// creating the correct type of configs based on the `type` property. +func (scs *ConfigMap) UnmarshalJSON(data []byte) error { + if len(data) == 0 { + return nil + } + + if len(data) == 4 && string(data) == "null" { + return nil + } + + //TODO: use a more sophisticated combination of dec.Token() and dec.More(), + // which would allow us to support both arrays and maps for this config? + var protoConfigs map[string]protoConfig + if err := strictJSONUnmarshal(data, &protoConfigs); err != nil { + return err + } + + result := make(ConfigMap, len(protoConfigs)) + for k, v := range protoConfigs { + if v.Type == "" { + return fmt.Errorf("execution config '%s' doesn't have a type value", k) + } + config, err := GetParsedConfig(k, v.Type, v.rawJSON) + if err != nil { + return err + } + result[k] = config + } + + *scs = result + + return nil +} + +// Validate checks if all of the specified scheduler options make sense +func (scs ConfigMap) Validate() (errors []error) { + for name, scheduler := range scs { + if schedErr := scheduler.Validate(); len(schedErr) != 0 { + errors = append(errors, + fmt.Errorf("scheduler %s has errors: %s", name, concatErrors(schedErr, ", "))) + } + } + return errors +} + +type protoConfig struct { + BaseConfig + rawJSON json.RawMessage +} + +// UnmarshalJSON just reads unmarshals the base config (to get the type), but it also +// stores the unprocessed JSON so we can parse the full config in the next step +func (pc *protoConfig) UnmarshalJSON(b []byte) error { + *pc = protoConfig{BaseConfig{}, b} + return json.Unmarshal(b, &pc.BaseConfig) +} diff --git a/lib/scheduler/constant_arrival_rate.go b/lib/scheduler/constant_arrival_rate.go new file mode 100644 index 00000000000..ed53297e924 --- /dev/null +++ b/lib/scheduler/constant_arrival_rate.go @@ -0,0 +1,115 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package scheduler + +import ( + "fmt" + "time" + + "github.com/loadimpact/k6/lib/types" + null "gopkg.in/guregu/null.v3" +) + +const constantArrivalRateType = "constant-arrival-rate" + +func init() { + RegisterConfigType(constantArrivalRateType, func(name string, rawJSON []byte) (Config, error) { + config := NewConstantArrivalRateConfig(name) + err := strictJSONUnmarshal(rawJSON, &config) + return config, err + }) +} + +// ConstantArrivalRateConfig stores config for the constant arrival-rate scheduler +type ConstantArrivalRateConfig struct { + BaseConfig + Rate null.Int `json:"rate"` + TimeUnit types.NullDuration `json:"timeUnit"` + Duration types.NullDuration `json:"duration"` + + // Initialize `PreAllocatedVUs` number of VUs, and if more than that are needed, + // they will be dynamically allocated, until `MaxVUs` is reached, which is an + // absolutely hard limit on the number of VUs the scheduler will use + PreAllocatedVUs null.Int `json:"preAllocatedVUs"` + MaxVUs null.Int `json:"maxVUs"` +} + +// NewConstantArrivalRateConfig returns a ConstantArrivalRateConfig with default values +func NewConstantArrivalRateConfig(name string) ConstantArrivalRateConfig { + return ConstantArrivalRateConfig{ + BaseConfig: NewBaseConfig(name, constantArrivalRateType, false), + TimeUnit: types.NewNullDuration(1*time.Second, false), + } +} + +// Make sure we implement the Config interface +var _ Config = &ConstantArrivalRateConfig{} + +// Validate makes sure all options are configured and valid +func (carc ConstantArrivalRateConfig) Validate() []error { + errors := carc.BaseConfig.Validate() + if !carc.Rate.Valid { + errors = append(errors, fmt.Errorf("the iteration rate isn't specified")) + } else if carc.Rate.Int64 <= 0 { + errors = append(errors, fmt.Errorf("the iteration rate should be more than 0")) + } + + if time.Duration(carc.TimeUnit.Duration) <= 0 { + errors = append(errors, fmt.Errorf("the timeUnit should be more than 0")) + } + + if !carc.Duration.Valid { + errors = append(errors, fmt.Errorf("the duration is unspecified")) + } else if time.Duration(carc.Duration.Duration) < minDuration { + errors = append(errors, fmt.Errorf( + "the duration should be at least %s, but is %s", minDuration, carc.Duration, + )) + } + + if !carc.PreAllocatedVUs.Valid { + errors = append(errors, fmt.Errorf("the number of preAllocatedVUs isn't specified")) + } else if carc.PreAllocatedVUs.Int64 < 0 { + errors = append(errors, fmt.Errorf("the number of preAllocatedVUs shouldn't be negative")) + } + + if !carc.MaxVUs.Valid { + errors = append(errors, fmt.Errorf("the number of maxVUs isn't specified")) + } else if carc.MaxVUs.Int64 < carc.PreAllocatedVUs.Int64 { + errors = append(errors, fmt.Errorf("maxVUs shouldn't be less than preAllocatedVUs")) + } + + return errors +} + +// GetMaxVUs returns the absolute maximum number of possible concurrently running VUs +func (carc ConstantArrivalRateConfig) GetMaxVUs() int64 { + return carc.MaxVUs.Int64 +} + +// GetMaxDuration returns the maximum duration time for this scheduler, including +// the specified iterationTimeout, if the iterations are uninterruptible +func (carc ConstantArrivalRateConfig) GetMaxDuration() time.Duration { + maxDuration := carc.Duration.Duration + if !carc.Interruptible.Bool { + maxDuration += carc.IterationTimeout.Duration + } + return time.Duration(maxDuration) +} diff --git a/lib/scheduler/constant_looping_vus.go b/lib/scheduler/constant_looping_vus.go new file mode 100644 index 00000000000..a6293cb1b17 --- /dev/null +++ b/lib/scheduler/constant_looping_vus.go @@ -0,0 +1,113 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package scheduler + +import ( + "fmt" + "time" + + "github.com/loadimpact/k6/lib/types" + null "gopkg.in/guregu/null.v3" +) + +const constantLoopingVUsType = "constant-looping-vus" + +func init() { + RegisterConfigType(constantLoopingVUsType, func(name string, rawJSON []byte) (Config, error) { + config := NewConstantLoopingVUsConfig(name) + err := strictJSONUnmarshal(rawJSON, &config) + return config, err + }) +} + +// The minimum duration we'll allow users to schedule. This doesn't affect the stages +// configuration, where 0-duration virtual stages are allowed for instantaneous VU jumps +const minDuration = 1 * time.Second + +// ConstantLoopingVUsConfig stores VUs and duration +type ConstantLoopingVUsConfig struct { + BaseConfig + VUs null.Int `json:"vus"` + Duration types.NullDuration `json:"duration"` +} + +// NewConstantLoopingVUsConfig returns a ConstantLoopingVUsConfig with default values +func NewConstantLoopingVUsConfig(name string) ConstantLoopingVUsConfig { + return ConstantLoopingVUsConfig{ + BaseConfig: NewBaseConfig(name, constantLoopingVUsType, false), + VUs: null.NewInt(1, false), + } +} + +// Make sure we implement the Config interface +var _ Config = &ConstantLoopingVUsConfig{} + +// Validate makes sure all options are configured and valid +func (lcv ConstantLoopingVUsConfig) Validate() []error { + errors := lcv.BaseConfig.Validate() + if lcv.VUs.Int64 <= 0 { + errors = append(errors, fmt.Errorf("the number of VUs should be more than 0")) + } + + if !lcv.Duration.Valid { + errors = append(errors, fmt.Errorf("the duration is unspecified")) + } else if time.Duration(lcv.Duration.Duration) < minDuration { + errors = append(errors, fmt.Errorf( + "the duration should be at least %s, but is %s", minDuration, lcv.Duration, + )) + } + + return errors +} + +// GetMaxVUs returns the absolute maximum number of possible concurrently running VUs +func (lcv ConstantLoopingVUsConfig) GetMaxVUs() int64 { + return lcv.VUs.Int64 +} + +// GetMaxDuration returns the maximum duration time for this scheduler, including +// the specified iterationTimeout, if the iterations are uninterruptible +func (lcv ConstantLoopingVUsConfig) GetMaxDuration() time.Duration { + maxDuration := lcv.Duration.Duration + if !lcv.Interruptible.Bool { + maxDuration += lcv.IterationTimeout.Duration + } + return time.Duration(maxDuration) +} + +// Split divides the VUS as best it can, but keeps the same duration +func (lcv ConstantLoopingVUsConfig) Split(percentages []float64) ([]Config, error) { + if err := checkPercentagesSum(percentages); err != nil { + return nil, err + } + configs := make([]Config, len(percentages)) + for i, p := range percentages { + //TODO: figure out a better approach for the proportional distribution + // of the VUs (which are indivisible items)... + // Some sort of "pick closest match to percentage and adjust remaining"? + configs[i] = &ConstantLoopingVUsConfig{ + BaseConfig: *lcv.BaseConfig.CopyWithPercentage(p), + VUs: null.IntFrom(int64(float64(lcv.VUs.Int64) / p)), + Duration: lcv.Duration, + } + } + return configs, nil +} diff --git a/lib/scheduler/helpers.go b/lib/scheduler/helpers.go new file mode 100644 index 00000000000..31bf37bf22a --- /dev/null +++ b/lib/scheduler/helpers.go @@ -0,0 +1,88 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package scheduler + +import ( + "bytes" + "encoding/json" + "fmt" + "math" + "strings" +) + +// A helper function to verify percentage distributions +func checkPercentagesSum(percentages []float64) error { + var sum float64 + for _, v := range percentages { + sum += v + } + if math.Abs(100-sum) >= minPercentage { + return fmt.Errorf("split percentage sum is %.2f while it should be 100", sum) + } + return nil +} + +// A helper function for joining error messages into a single string +func concatErrors(errors []error, separator string) string { + errStrings := make([]string, len(errors)) + for i, e := range errors { + errStrings[i] = e.Error() + } + return strings.Join(errStrings, separator) +} + +// Decode a JSON in a strict manner, emitting an error if there are unknown fields +func strictJSONUnmarshal(data []byte, v interface{}) error { + dec := json.NewDecoder(bytes.NewReader(data)) + dec.DisallowUnknownFields() + dec.UseNumber() + + if err := dec.Decode(&v); err != nil { + return err + } + if dec.More() { + return fmt.Errorf("unexpected data after the JSON object") + } + return nil +} + +// A helper function to avoid code duplication +func validateStages(stages []Stage) []error { + var errors []error + if len(stages) == 0 { + errors = append(errors, fmt.Errorf("at least one stage has to be specified")) + } else { + for i, s := range stages { + stageNum := i + 1 + if !s.Duration.Valid { + errors = append(errors, fmt.Errorf("stage %d doesn't have a duration", stageNum)) + } else if s.Duration.Duration < 0 { + errors = append(errors, fmt.Errorf("the duration for stage %d shouldn't be negative", stageNum)) + } + if !s.Target.Valid { + errors = append(errors, fmt.Errorf("stage %d doesn't have a target", stageNum)) + } else if s.Target.Int64 < 0 { + errors = append(errors, fmt.Errorf("the target for stage %d shouldn't be negative", stageNum)) + } + } + } + return errors +} diff --git a/lib/scheduler/helpers_test.go b/lib/scheduler/helpers_test.go new file mode 100644 index 00000000000..8165f922fd6 --- /dev/null +++ b/lib/scheduler/helpers_test.go @@ -0,0 +1,79 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package scheduler + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCheckPercentagesSum(t *testing.T) { + t.Parallel() + assert.NoError(t, checkPercentagesSum([]float64{100})) + assert.NoError(t, checkPercentagesSum([]float64{50, 50})) + assert.NoError(t, checkPercentagesSum([]float64{100.0 / 3, 100.0 / 3, 100.0 / 3})) + assert.NoError(t, checkPercentagesSum([]float64{33.33, 33.33, 33.34})) + + assert.Error(t, checkPercentagesSum([]float64{})) + assert.Error(t, checkPercentagesSum([]float64{100 / 3, 100 / 3, 100 / 3})) + assert.Error(t, checkPercentagesSum([]float64{33.33, 33.33, 33.33})) + assert.Error(t, checkPercentagesSum([]float64{40, 40, 40})) +} + +func TestStrictJSONUnmarshal(t *testing.T) { + t.Parallel() + type someElement struct { + Data int `json:"data"` + Props map[string]string `json:"props"` + } + + testCases := []struct { + data string + expectedError bool + destination interface{} + expectedResult interface{} + }{ + {``, true, &someElement{}, nil}, + {`123`, true, &someElement{}, nil}, + {`"blah"`, true, &someElement{}, nil}, + {`null`, false, &someElement{}, &someElement{}}, + { + `{"data": 123, "props": {"test": "mest"}}`, false, &someElement{}, + &someElement{123, map[string]string{"test": "mest"}}, + }, + {`{"data": 123, "props": {"test": "mest"}}asdg`, true, &someElement{}, nil}, + } + for i, tc := range testCases { + tc := tc + t.Run(fmt.Sprintf("TestCase#%d", i), func(t *testing.T) { + err := strictJSONUnmarshal([]byte(tc.data), &tc.destination) + if tc.expectedError { + require.Error(t, err) + return + } + require.NoError(t, err) + assert.Equal(t, tc.expectedResult, tc.destination) + }) + } +} diff --git a/lib/scheduler/interfaces.go b/lib/scheduler/interfaces.go new file mode 100644 index 00000000000..764df42ae85 --- /dev/null +++ b/lib/scheduler/interfaces.go @@ -0,0 +1,33 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package scheduler + +import "time" + +// Config is an interface that should be implemented by all scheduler config types +type Config interface { + GetBaseConfig() BaseConfig + Validate() []error + GetMaxVUs() int64 + GetMaxDuration() time.Duration // includes max timeouts, to allow us to share VUs between schedulers in the future + //TODO: Split(percentages []float64) ([]Config, error) + //TODO: String() method that could be used for priting descriptions of the currently running schedulers for the UI? +} diff --git a/lib/scheduler/per_vu_iterations.go b/lib/scheduler/per_vu_iterations.go new file mode 100644 index 00000000000..25501411ea4 --- /dev/null +++ b/lib/scheduler/per_vu_iterations.go @@ -0,0 +1,95 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package scheduler + +import ( + "fmt" + "time" + + "github.com/loadimpact/k6/lib/types" + null "gopkg.in/guregu/null.v3" +) + +const perVUIterationsType = "per-vu-iterations" + +func init() { + RegisterConfigType(perVUIterationsType, func(name string, rawJSON []byte) (Config, error) { + config := NewPerVUIterationsConfig(name) + err := strictJSONUnmarshal(rawJSON, &config) + return config, err + }) +} + +// PerVUIteationsConfig stores the number of VUs iterations, as well as maxDuration settings +type PerVUIteationsConfig struct { + BaseConfig + VUs null.Int `json:"vus"` + Iterations null.Int `json:"iterations"` + MaxDuration types.NullDuration `json:"maxDuration"` +} + +// NewPerVUIterationsConfig returns a PerVUIteationsConfig with default values +func NewPerVUIterationsConfig(name string) PerVUIteationsConfig { + return PerVUIteationsConfig{ + BaseConfig: NewBaseConfig(name, perVUIterationsType, false), + VUs: null.NewInt(1, false), + Iterations: null.NewInt(1, false), + MaxDuration: types.NewNullDuration(1*time.Hour, false), + } +} + +// Make sure we implement the Config interface +var _ Config = &PerVUIteationsConfig{} + +// Validate makes sure all options are configured and valid +func (pvic PerVUIteationsConfig) Validate() []error { + errors := pvic.BaseConfig.Validate() + if pvic.VUs.Int64 <= 0 { + errors = append(errors, fmt.Errorf("the number of VUs should be more than 0")) + } + + if pvic.Iterations.Int64 <= 0 { + errors = append(errors, fmt.Errorf("the number of iterations should be more than 0")) + } + + if time.Duration(pvic.MaxDuration.Duration) < minDuration { + errors = append(errors, fmt.Errorf( + "the maxDuration should be at least %s, but is %s", minDuration, pvic.MaxDuration, + )) + } + + return errors +} + +// GetMaxVUs returns the absolute maximum number of possible concurrently running VUs +func (pvic PerVUIteationsConfig) GetMaxVUs() int64 { + return pvic.VUs.Int64 +} + +// GetMaxDuration returns the maximum duration time for this scheduler, including +// the specified iterationTimeout, if the iterations are uninterruptible +func (pvic PerVUIteationsConfig) GetMaxDuration() time.Duration { + maxDuration := pvic.MaxDuration.Duration + if !pvic.Interruptible.Bool { + maxDuration += pvic.IterationTimeout.Duration + } + return time.Duration(maxDuration) +} diff --git a/lib/scheduler/schedulers_test.go b/lib/scheduler/schedulers_test.go new file mode 100644 index 00000000000..11c07453f64 --- /dev/null +++ b/lib/scheduler/schedulers_test.go @@ -0,0 +1,239 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package scheduler + +import ( + "encoding/json" + "fmt" + "testing" + "time" + + "github.com/loadimpact/k6/lib/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + null "gopkg.in/guregu/null.v3" +) + +type configMapTestCase struct { + rawJSON string + expectParseError bool + expectValidationError bool + customValidator func(t *testing.T, cm ConfigMap) +} + +//nolint:lll,gochecknoglobals +var configMapTestCases = []configMapTestCase{ + {"", true, false, nil}, + {"1234", true, false, nil}, + {"asdf", true, false, nil}, + {"'adsf'", true, false, nil}, + {"[]", true, false, nil}, + {"{}", false, false, func(t *testing.T, cm ConfigMap) { + assert.Equal(t, cm, ConfigMap{}) + }}, + {"{}asdf", true, false, nil}, + {"null", false, false, func(t *testing.T, cm ConfigMap) { + assert.Nil(t, cm) + }}, + {`{"someKey": {}}`, true, false, nil}, + {`{"someKey": {"type": "constant-blah-blah", "vus": 10, "duration": "60s"}}`, true, false, nil}, + {`{"someKey": {"type": "constant-looping-vus", "uknownField": "should_error"}}`, true, false, nil}, + {`{"someKey": {"type": "constant-looping-vus", "vus": 10, "duration": "60s", "env": 123}}`, true, false, nil}, + + // Validation errors for constant-looping-vus and the base config + {`{"someKey": {"type": "constant-looping-vus", "vus": 10, "duration": "60s", "interruptible": false, + "iterationTimeout": "10s", "startTime": "70s", "env": {"test": "mest"}, "exec": "someFunc"}}`, + false, false, func(t *testing.T, cm ConfigMap) { + sched := NewConstantLoopingVUsConfig("someKey") + sched.VUs = null.IntFrom(10) + sched.Duration = types.NullDurationFrom(1 * time.Minute) + sched.Interruptible = null.BoolFrom(false) + sched.IterationTimeout = types.NullDurationFrom(10 * time.Second) + sched.StartTime = types.NullDurationFrom(70 * time.Second) + sched.Exec = null.StringFrom("someFunc") + sched.Env = map[string]string{"test": "mest"} + require.Equal(t, cm, ConfigMap{"someKey": sched}) + require.Equal(t, sched.BaseConfig, cm["someKey"].GetBaseConfig()) + assert.Equal(t, 70*time.Second, cm["someKey"].GetMaxDuration()) + assert.Equal(t, int64(10), cm["someKey"].GetMaxVUs()) + assert.Empty(t, cm["someKey"].Validate()) + }}, + {`{"aname": {"type": "constant-looping-vus", "duration": "60s"}}`, false, false, nil}, + {`{"": {"type": "constant-looping-vus", "vus": 10, "duration": "60s"}}`, false, true, nil}, + {`{"aname": {"type": "constant-looping-vus"}}`, false, true, nil}, + {`{"aname": {"type": "constant-looping-vus", "vus": 0.5}}`, true, false, nil}, + {`{"aname": {"type": "constant-looping-vus", "vus": 10}}`, false, true, nil}, + {`{"aname": {"type": "constant-looping-vus", "vus": 0, "duration": "60s"}}`, false, true, nil}, + {`{"aname": {"type": "constant-looping-vus", "vus": -1, "duration": "60s"}}`, false, true, nil}, + {`{"aname": {"type": "constant-looping-vus", "vus": 10, "duration": "0s"}}`, false, true, nil}, + {`{"aname": {"type": "constant-looping-vus", "vus": 10, "duration": "10s", "startTime": "-10s"}}`, false, true, nil}, + {`{"aname": {"type": "constant-looping-vus", "vus": 10, "duration": "10s", "exec": ""}}`, false, true, nil}, + {`{"aname": {"type": "constant-looping-vus", "vus": 10, "duration": "10s", "iterationTimeout": "-2s"}}`, false, true, nil}, + + // variable-looping-vus + {`{"varloops": {"type": "variable-looping-vus", "startVUs": 20, "iterationTimeout": "15s", + "stages": [{"duration": "60s", "target": 30}, {"duration": "120s", "target": 10}]}}`, + false, false, func(t *testing.T, cm ConfigMap) { + sched := NewVariableLoopingVUsConfig("varloops") + sched.IterationTimeout = types.NullDurationFrom(15 * time.Second) + sched.StartVUs = null.IntFrom(20) + sched.Stages = []Stage{ + {Target: null.IntFrom(30), Duration: types.NullDurationFrom(60 * time.Second)}, + {Target: null.IntFrom(10), Duration: types.NullDurationFrom(120 * time.Second)}, + } + require.Equal(t, cm, ConfigMap{"varloops": sched}) + assert.Equal(t, int64(30), cm["varloops"].GetMaxVUs()) + assert.Equal(t, 195*time.Second, cm["varloops"].GetMaxDuration()) + assert.Empty(t, cm["varloops"].Validate()) + }}, + {`{"varloops": {"type": "variable-looping-vus", "startVUs": 0, "stages": [{"duration": "60s", "target": 0}]}}`, false, false, nil}, + {`{"varloops": {"type": "variable-looping-vus", "startVUs": -1, "stages": [{"duration": "60s", "target": 30}]}}`, false, true, nil}, + {`{"varloops": {"type": "variable-looping-vus", "startVUs": 2, "stages": [{"duration": "-60s", "target": 30}]}}`, false, true, nil}, + {`{"varloops": {"type": "variable-looping-vus", "startVUs": 2, "stages": [{"duration": "60s", "target": -30}]}}`, false, true, nil}, + {`{"varloops": {"type": "variable-looping-vus", "stages": [{"duration": "60s"}]}}`, false, true, nil}, + {`{"varloops": {"type": "variable-looping-vus", "stages": [{"target": 30}]}}`, false, true, nil}, + {`{"varloops": {"type": "variable-looping-vus", "stages": []}}`, false, true, nil}, + {`{"varloops": {"type": "variable-looping-vus"}}`, false, true, nil}, + + // shared-iterations + {`{"ishared": {"type": "shared-iterations", "iterations": 20, "vus": 10}}`, + false, false, func(t *testing.T, cm ConfigMap) { + sched := NewSharedIterationsConfig("ishared") + sched.Iterations = null.IntFrom(20) + sched.VUs = null.IntFrom(10) + require.Equal(t, cm, ConfigMap{"ishared": sched}) + assert.Equal(t, int64(10), cm["ishared"].GetMaxVUs()) + assert.Equal(t, 3630*time.Second, cm["ishared"].GetMaxDuration()) + assert.Empty(t, cm["ishared"].Validate()) + }}, + {`{"ishared": {"type": "shared-iterations"}}`, false, false, nil}, // Has 1 VU & 1 iter default values + {`{"ishared": {"type": "shared-iterations", "iterations": 20}}`, false, false, nil}, + {`{"ishared": {"type": "shared-iterations", "vus": 10}}`, false, true, nil}, // error because VUs are more than iters + {`{"ishared": {"type": "shared-iterations", "iterations": 20, "vus": 10, "maxDuration": "30m"}}`, false, false, nil}, + {`{"ishared": {"type": "shared-iterations", "iterations": 20, "vus": 10, "maxDuration": "-3m"}}`, false, true, nil}, + {`{"ishared": {"type": "shared-iterations", "iterations": 20, "vus": 10, "maxDuration": "0s"}}`, false, true, nil}, + {`{"ishared": {"type": "shared-iterations", "iterations": 20, "vus": -10}}`, false, true, nil}, + {`{"ishared": {"type": "shared-iterations", "iterations": -1, "vus": 1}}`, false, true, nil}, + {`{"ishared": {"type": "shared-iterations", "iterations": 20, "vus": 30}}`, false, true, nil}, + + // per-vu-iterations + {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20, "vus": 10}}`, + false, false, func(t *testing.T, cm ConfigMap) { + sched := NewPerVUIterationsConfig("ipervu") + sched.Iterations = null.IntFrom(20) + sched.VUs = null.IntFrom(10) + require.Equal(t, cm, ConfigMap{"ipervu": sched}) + assert.Equal(t, int64(10), cm["ipervu"].GetMaxVUs()) + assert.Equal(t, 3630*time.Second, cm["ipervu"].GetMaxDuration()) + assert.Empty(t, cm["ipervu"].Validate()) + }}, + {`{"ipervu": {"type": "per-vu-iterations"}}`, false, false, nil}, // Has 1 VU & 1 iter default values + {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20}}`, false, false, nil}, + {`{"ipervu": {"type": "per-vu-iterations", "vus": 10}}`, false, false, nil}, + {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20, "vus": 10}}`, false, false, nil}, + {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20, "vus": 10, "maxDuration": "-3m"}}`, false, true, nil}, + {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20, "vus": 10, "maxDuration": "0s"}}`, false, true, nil}, + {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20, "vus": -10}}`, false, true, nil}, + {`{"ipervu": {"type": "per-vu-iterations", "iterations": -1, "vus": 1}}`, false, true, nil}, + + // constant-arrival-rate + {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "timeUnit": "1m", "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 30}}`, + false, false, func(t *testing.T, cm ConfigMap) { + sched := NewConstantArrivalRateConfig("carrival") + sched.Rate = null.IntFrom(10) + sched.Duration = types.NullDurationFrom(10 * time.Minute) + sched.TimeUnit = types.NullDurationFrom(1 * time.Minute) + sched.PreAllocatedVUs = null.IntFrom(20) + sched.MaxVUs = null.IntFrom(30) + require.Equal(t, cm, ConfigMap{"carrival": sched}) + assert.Equal(t, int64(30), cm["carrival"].GetMaxVUs()) + assert.Equal(t, 630*time.Second, cm["carrival"].GetMaxDuration()) + assert.Empty(t, cm["carrival"].Validate()) + }}, + {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 30}}`, false, false, nil}, + {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 30, "timeUnit": "-1s"}}`, false, true, nil}, + {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "10m", "preAllocatedVUs": 20}}`, false, true, nil}, + {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "10m", "maxVUs": 30}}`, false, true, nil}, + {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "preAllocatedVUs": 20, "maxVUs": 30}}`, false, true, nil}, + {`{"carrival": {"type": "constant-arrival-rate", "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 30}}`, false, true, nil}, + {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "0m", "preAllocatedVUs": 20, "maxVUs": 30}}`, false, true, nil}, + {`{"carrival": {"type": "constant-arrival-rate", "rate": 0, "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 30}}`, false, true, nil}, + {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 15}}`, false, true, nil}, + {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "0s", "preAllocatedVUs": 20, "maxVUs": 25}}`, false, true, nil}, + {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "10m", "preAllocatedVUs": -2, "maxVUs": 25}}`, false, true, nil}, + + // variable-arrival-rate + {`{"varrival": {"type": "variable-arrival-rate", "startRate": 10, "timeUnit": "30s", "preAllocatedVUs": 20, "maxVUs": 50, + "stages": [{"duration": "3m", "target": 30}, {"duration": "5m", "target": 10}]}}`, + false, false, func(t *testing.T, cm ConfigMap) { + sched := NewVariableArrivalRateConfig("varrival") + sched.StartRate = null.IntFrom(10) + sched.Stages = []Stage{ + {Target: null.IntFrom(30), Duration: types.NullDurationFrom(180 * time.Second)}, + {Target: null.IntFrom(10), Duration: types.NullDurationFrom(300 * time.Second)}, + } + sched.TimeUnit = types.NullDurationFrom(30 * time.Second) + sched.PreAllocatedVUs = null.IntFrom(20) + sched.MaxVUs = null.IntFrom(50) + require.Equal(t, cm, ConfigMap{"varrival": sched}) + assert.Equal(t, int64(50), cm["varrival"].GetMaxVUs()) + assert.Equal(t, 510*time.Second, cm["varrival"].GetMaxDuration()) + assert.Empty(t, cm["varrival"].Validate()) + }}, + {`{"varrival": {"type": "variable-arrival-rate", "preAllocatedVUs": 20, "maxVUs": 50, "stages": [{"duration": "5m", "target": 10}]}}`, false, false, nil}, + {`{"varrival": {"type": "variable-arrival-rate", "preAllocatedVUs": -20, "maxVUs": 50, "stages": [{"duration": "5m", "target": 10}]}}`, false, true, nil}, + {`{"varrival": {"type": "variable-arrival-rate", "startRate": -1, "preAllocatedVUs": 20, "maxVUs": 50, "stages": [{"duration": "5m", "target": 10}]}}`, false, true, nil}, + {`{"varrival": {"type": "variable-arrival-rate", "preAllocatedVUs": 20, "stages": [{"duration": "5m", "target": 10}]}}`, false, true, nil}, + {`{"varrival": {"type": "variable-arrival-rate", "maxVUs": 50, "stages": [{"duration": "5m", "target": 10}]}}`, false, true, nil}, + {`{"varrival": {"type": "variable-arrival-rate", "preAllocatedVUs": 20, "maxVUs": 50}}`, false, true, nil}, + {`{"varrival": {"type": "variable-arrival-rate", "preAllocatedVUs": 20, "maxVUs": 50, "stages": []}}`, false, true, nil}, + {`{"varrival": {"type": "variable-arrival-rate", "preAllocatedVUs": 20, "maxVUs": 50, "stages": [{"duration": "5m", "target": 10}], "timeUnit": "-1s"}}`, false, true, nil}, + {`{"varrival": {"type": "variable-arrival-rate", "preAllocatedVUs": 30, "maxVUs": 20, "stages": [{"duration": "5m", "target": 10}]}}`, false, true, nil}, +} + +func TestConfigMapParsingAndValidation(t *testing.T) { + t.Parallel() + for i, tc := range configMapTestCases { + tc := tc + t.Run(fmt.Sprintf("TestCase#%d", i), func(t *testing.T) { + t.Logf(tc.rawJSON) + var result ConfigMap + err := json.Unmarshal([]byte(tc.rawJSON), &result) + if tc.expectParseError { + require.Error(t, err) + return + } + require.NoError(t, err) + + validationErrors := result.Validate() + if tc.expectValidationError { + assert.NotEmpty(t, validationErrors) + } else { + assert.Empty(t, validationErrors) + } + if tc.customValidator != nil { + tc.customValidator(t, result) + } + }) + } +} + +//TODO: check percentage split calculations diff --git a/lib/scheduler/shared_iterations.go b/lib/scheduler/shared_iterations.go new file mode 100644 index 00000000000..d6ef2e2bb92 --- /dev/null +++ b/lib/scheduler/shared_iterations.go @@ -0,0 +1,98 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package scheduler + +import ( + "fmt" + "time" + + "github.com/loadimpact/k6/lib/types" + null "gopkg.in/guregu/null.v3" +) + +const sharedIterationsType = "shared-iterations" + +func init() { + RegisterConfigType(sharedIterationsType, func(name string, rawJSON []byte) (Config, error) { + config := NewSharedIterationsConfig(name) + err := strictJSONUnmarshal(rawJSON, &config) + return config, err + }) +} + +// SharedIteationsConfig stores the number of VUs iterations, as well as maxDuration settings +type SharedIteationsConfig struct { + BaseConfig + VUs null.Int `json:"vus"` + Iterations null.Int `json:"iterations"` + MaxDuration types.NullDuration `json:"maxDuration"` +} + +// NewSharedIterationsConfig returns a SharedIteationsConfig with default values +func NewSharedIterationsConfig(name string) SharedIteationsConfig { + return SharedIteationsConfig{ + BaseConfig: NewBaseConfig(name, sharedIterationsType, false), + VUs: null.NewInt(1, false), + Iterations: null.NewInt(1, false), + MaxDuration: types.NewNullDuration(1*time.Hour, false), + } +} + +// Make sure we implement the Config interface +var _ Config = &SharedIteationsConfig{} + +// Validate makes sure all options are configured and valid +func (sic SharedIteationsConfig) Validate() []error { + errors := sic.BaseConfig.Validate() + if sic.VUs.Int64 <= 0 { + errors = append(errors, fmt.Errorf("the number of VUs should be more than 0")) + } + + if sic.Iterations.Int64 < sic.VUs.Int64 { + errors = append(errors, fmt.Errorf( + "the number of iterations (%d) shouldn't be less than the number of VUs (%d)", + sic.Iterations.Int64, sic.VUs.Int64, + )) + } + + if time.Duration(sic.MaxDuration.Duration) < minDuration { + errors = append(errors, fmt.Errorf( + "the maxDuration should be at least %s, but is %s", minDuration, sic.MaxDuration, + )) + } + + return errors +} + +// GetMaxVUs returns the absolute maximum number of possible concurrently running VUs +func (sic SharedIteationsConfig) GetMaxVUs() int64 { + return sic.VUs.Int64 +} + +// GetMaxDuration returns the maximum duration time for this scheduler, including +// the specified iterationTimeout, if the iterations are uninterruptible +func (sic SharedIteationsConfig) GetMaxDuration() time.Duration { + maxDuration := sic.MaxDuration.Duration + if !sic.Interruptible.Bool { + maxDuration += sic.IterationTimeout.Duration + } + return time.Duration(maxDuration) +} diff --git a/lib/scheduler/variable_arrival_rate.go b/lib/scheduler/variable_arrival_rate.go new file mode 100644 index 00000000000..76ba7a51dcb --- /dev/null +++ b/lib/scheduler/variable_arrival_rate.go @@ -0,0 +1,111 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package scheduler + +import ( + "fmt" + "time" + + "github.com/loadimpact/k6/lib/types" + null "gopkg.in/guregu/null.v3" +) + +const variableArrivalRateType = "variable-arrival-rate" + +func init() { + RegisterConfigType(variableArrivalRateType, func(name string, rawJSON []byte) (Config, error) { + config := NewVariableArrivalRateConfig(name) + err := strictJSONUnmarshal(rawJSON, &config) + return config, err + }) +} + +// VariableArrivalRateConfig stores config for the variable arrival-rate scheduler +type VariableArrivalRateConfig struct { + BaseConfig + StartRate null.Int `json:"startRate"` + TimeUnit types.NullDuration `json:"timeUnit"` + Stages []Stage `json:"stages"` + + // Initialize `PreAllocatedVUs` number of VUs, and if more than that are needed, + // they will be dynamically allocated, until `MaxVUs` is reached, which is an + // absolutely hard limit on the number of VUs the scheduler will use + PreAllocatedVUs null.Int `json:"preAllocatedVUs"` + MaxVUs null.Int `json:"maxVUs"` +} + +// NewVariableArrivalRateConfig returns a VariableArrivalRateConfig with default values +func NewVariableArrivalRateConfig(name string) VariableArrivalRateConfig { + return VariableArrivalRateConfig{ + BaseConfig: NewBaseConfig(name, variableArrivalRateType, false), + TimeUnit: types.NewNullDuration(1*time.Second, false), + } +} + +// Make sure we implement the Config interface +var _ Config = &VariableArrivalRateConfig{} + +// Validate makes sure all options are configured and valid +func (varc VariableArrivalRateConfig) Validate() []error { + errors := varc.BaseConfig.Validate() + + if varc.StartRate.Int64 < 0 { + errors = append(errors, fmt.Errorf("the startRate value shouldn't be negative")) + } + + if time.Duration(varc.TimeUnit.Duration) < 0 { + errors = append(errors, fmt.Errorf("the timeUnit should be more than 0")) + } + + errors = append(errors, validateStages(varc.Stages)...) + + if !varc.PreAllocatedVUs.Valid { + errors = append(errors, fmt.Errorf("the number of preAllocatedVUs isn't specified")) + } else if varc.PreAllocatedVUs.Int64 < 0 { + errors = append(errors, fmt.Errorf("the number of preAllocatedVUs shouldn't be negative")) + } + + if !varc.MaxVUs.Valid { + errors = append(errors, fmt.Errorf("the number of maxVUs isn't specified")) + } else if varc.MaxVUs.Int64 < varc.PreAllocatedVUs.Int64 { + errors = append(errors, fmt.Errorf("maxVUs shouldn't be less than preAllocatedVUs")) + } + + return errors +} + +// GetMaxVUs returns the absolute maximum number of possible concurrently running VUs +func (varc VariableArrivalRateConfig) GetMaxVUs() int64 { + return varc.MaxVUs.Int64 +} + +// GetMaxDuration returns the maximum duration time for this scheduler, including +// the specified iterationTimeout, if the iterations are uninterruptible +func (varc VariableArrivalRateConfig) GetMaxDuration() time.Duration { + var maxDuration types.Duration + for _, s := range varc.Stages { + maxDuration += s.Duration.Duration + } + if !varc.Interruptible.Bool { + maxDuration += varc.IterationTimeout.Duration + } + return time.Duration(maxDuration) +} diff --git a/lib/scheduler/variable_looping_vus.go b/lib/scheduler/variable_looping_vus.go new file mode 100644 index 00000000000..f1ab3de1769 --- /dev/null +++ b/lib/scheduler/variable_looping_vus.go @@ -0,0 +1,94 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package scheduler + +import ( + "fmt" + "time" + + "github.com/loadimpact/k6/lib/types" + null "gopkg.in/guregu/null.v3" +) + +const variableLoopingVUsType = "variable-looping-vus" + +func init() { + RegisterConfigType(variableLoopingVUsType, func(name string, rawJSON []byte) (Config, error) { + config := NewVariableLoopingVUsConfig(name) + err := strictJSONUnmarshal(rawJSON, &config) + return config, err + }) +} + +// Stage contains +type Stage struct { + Duration types.NullDuration `json:"duration"` + Target null.Int `json:"target"` // TODO: maybe rename this to endVUs? something else? +} + +// VariableLoopingVUsConfig stores the configuration for the stages scheduler +type VariableLoopingVUsConfig struct { + BaseConfig + StartVUs null.Int `json:"startVUs"` + Stages []Stage `json:"stages"` +} + +// NewVariableLoopingVUsConfig returns a VariableLoopingVUsConfig with its default values +func NewVariableLoopingVUsConfig(name string) VariableLoopingVUsConfig { + return VariableLoopingVUsConfig{BaseConfig: NewBaseConfig(name, variableLoopingVUsType, false)} +} + +// Make sure we implement the Config interface +var _ Config = &VariableLoopingVUsConfig{} + +// Validate makes sure all options are configured and valid +func (vlvc VariableLoopingVUsConfig) Validate() []error { + errors := vlvc.BaseConfig.Validate() + if vlvc.StartVUs.Int64 < 0 { + errors = append(errors, fmt.Errorf("the number of start VUs shouldn't be negative")) + } + + return append(errors, validateStages(vlvc.Stages)...) +} + +// GetMaxVUs returns the absolute maximum number of possible concurrently running VUs +func (vlvc VariableLoopingVUsConfig) GetMaxVUs() int64 { + maxVUs := vlvc.StartVUs.Int64 + for _, s := range vlvc.Stages { + if s.Target.Int64 > maxVUs { + maxVUs = s.Target.Int64 + } + } + return maxVUs +} + +// GetMaxDuration returns the maximum duration time for this scheduler, including +// the specified iterationTimeout, if the iterations are uninterruptible +func (vlvc VariableLoopingVUsConfig) GetMaxDuration() time.Duration { + var maxDuration types.Duration + for _, s := range vlvc.Stages { + maxDuration += s.Duration.Duration + } + if !vlvc.Interruptible.Bool { + maxDuration += vlvc.IterationTimeout.Duration + } + return time.Duration(maxDuration) +} diff --git a/lib/testutils/logrus_hook.go b/lib/testutils/logrus_hook.go new file mode 100644 index 00000000000..4a355bf8ba0 --- /dev/null +++ b/lib/testutils/logrus_hook.go @@ -0,0 +1,39 @@ +package testutils + +import ( + "sync" + + log "github.com/sirupsen/logrus" +) + +// SimpleLogrusHook implements the logrus.Hook interface and could be used to check +// if log messages were outputted +type SimpleLogrusHook struct { + HookedLevels []log.Level + mutex sync.Mutex + messageCache []log.Entry +} + +// Levels just returns whatever was stored in the HookedLevels slice +func (smh *SimpleLogrusHook) Levels() []log.Level { + return smh.HookedLevels +} + +// Fire saves whatever message the logrus library passed in the cache +func (smh *SimpleLogrusHook) Fire(e *log.Entry) error { + smh.mutex.Lock() + defer smh.mutex.Unlock() + smh.messageCache = append(smh.messageCache, *e) + return nil +} + +// Drain returns the currently stored messages and deletes them from the cache +func (smh *SimpleLogrusHook) Drain() []log.Entry { + smh.mutex.Lock() + defer smh.mutex.Unlock() + res := smh.messageCache + smh.messageCache = []log.Entry{} + return res +} + +var _ log.Hook = &SimpleLogrusHook{}