From 029db7c54a4f0eaedc5477b9f5c6ae9a21f09cee Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Tue, 29 Jan 2019 12:59:11 +0200 Subject: [PATCH 01/36] Add the config framework for the different schedulers --- cmd/run.go | 15 ++++ converter/har/converter.go | 2 +- js/bundle.go | 2 +- lib/options.go | 20 +++++- lib/scheduler/base_config.go | 95 ++++++++++++++++++++++++++ lib/scheduler/configmap.go | 92 +++++++++++++++++++++++++ lib/scheduler/constant_arrival_rate.go | 73 ++++++++++++++++++++ lib/scheduler/constant_looping_vus.go | 70 +++++++++++++++++++ lib/scheduler/helpers.go | 28 ++++++++ lib/scheduler/interfaces.go | 16 +++++ lib/scheduler/per_vu_iterations.go | 54 +++++++++++++++ lib/scheduler/shared_iterations.go | 54 +++++++++++++++ lib/scheduler/variable_arrival_rate.go | 68 ++++++++++++++++++ lib/scheduler/variable_looping_vus.go | 59 ++++++++++++++++ 14 files changed, 644 insertions(+), 4 deletions(-) create mode 100644 lib/scheduler/base_config.go create mode 100644 lib/scheduler/configmap.go create mode 100644 lib/scheduler/constant_arrival_rate.go create mode 100644 lib/scheduler/constant_looping_vus.go create mode 100644 lib/scheduler/helpers.go create mode 100644 lib/scheduler/interfaces.go create mode 100644 lib/scheduler/per_vu_iterations.go create mode 100644 lib/scheduler/shared_iterations.go create mode 100644 lib/scheduler/variable_arrival_rate.go create mode 100644 lib/scheduler/variable_looping_vus.go diff --git a/cmd/run.go b/cmd/run.go index f5b3b432302..f51497d9a2c 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -37,6 +37,8 @@ import ( "syscall" "time" + "github.com/davecgh/go-spew/spew" + "github.com/loadimpact/k6/api" "github.com/loadimpact/k6/core" "github.com/loadimpact/k6/core/local" @@ -61,6 +63,7 @@ const ( teardownTimeoutErrorCode = 101 genericTimeoutErrorCode = 102 genericEngineErrorCode = 103 + invalidOptionsErrorCode = 104 ) var ( @@ -165,6 +168,18 @@ a commandline interface for interacting with it.`, conf.Duration = types.NullDuration{} } + //TODO: move a bunch of the logic above to a config "constructor" and to the Validate() method + spew.Dump(conf.Options.Execution) + if errList := conf.Validate(); len(errList) != 0 { + errMsg := []string{ + fmt.Sprintf("There were %d errors with the script options:", len(errList)), + } + for _, err := range errList { + errMsg = append(errMsg, fmt.Sprintf("\t- %s", err.Error())) + } + return ExitCode{errors.New(strings.Join(errMsg, "\n")), invalidOptionsErrorCode} + } + // If summary trend stats are defined, update the UI to reflect them if len(conf.SummaryTrendStats) > 0 { ui.UpdateTrendColumns(conf.SummaryTrendStats) diff --git a/converter/har/converter.go b/converter/har/converter.go index e600974f509..42c88a3467c 100644 --- a/converter/har/converter.go +++ b/converter/har/converter.go @@ -91,7 +91,7 @@ func Convert(h HAR, options lib.Options, minSleep, maxSleep uint, enableChecks b } fprint(w, "\nexport let options = {\n") - options.ForEachValid("json", func(key string, val interface{}) { + options.ForEachSpecified("json", func(key string, val interface{}) { if valJSON, err := json.MarshalIndent(val, " ", " "); err != nil { convertErr = err } else { diff --git a/js/bundle.go b/js/bundle.go index 75185c636dc..3b5de91f38e 100644 --- a/js/bundle.go +++ b/js/bundle.go @@ -224,7 +224,7 @@ func (b *Bundle) Instantiate() (bi *BundleInstance, instErr error) { } else { jsOptionsObj = jsOptions.ToObject(rt) } - b.Options.ForEachValid("json", func(key string, val interface{}) { + b.Options.ForEachSpecified("json", func(key string, val interface{}) { if err := jsOptionsObj.Set(key, val); err != nil { instErr = err } diff --git a/lib/options.go b/lib/options.go index 8285e4764d8..20e3f29961d 100644 --- a/lib/options.go +++ b/lib/options.go @@ -27,6 +27,7 @@ import ( "net" "reflect" + "github.com/loadimpact/k6/lib/scheduler" "github.com/loadimpact/k6/lib/types" "github.com/loadimpact/k6/stats" "github.com/pkg/errors" @@ -194,6 +195,8 @@ type Options struct { Iterations null.Int `json:"iterations" envconfig:"iterations"` Stages []Stage `json:"stages" envconfig:"stages"` + Execution scheduler.ConfigMap `json:"execution" envconfig:"execution"` + // Timeouts for the setup() and teardown() functions SetupTimeout types.NullDuration `json:"setupTimeout" envconfig:"setup_timeout"` TeardownTimeout types.NullDuration `json:"teardownTimeout" envconfig:"teardown_timeout"` @@ -307,6 +310,12 @@ func (o Options) Apply(opts Options) Options { } } } + + //TODO: handle o.Execution overwriting by plain vus/iterations/duration/stages options + if len(opts.Execution) > 0 { + o.Execution = opts.Execution + } + if opts.SetupTimeout.Valid { o.SetupTimeout = opts.SetupTimeout } @@ -395,10 +404,17 @@ func (o Options) Apply(opts Options) Options { return o } -// ForEachValid enumerates all struct fields and calls the supplied function with each +// Validate checks if all of the specified options make sense +func (o Options) Validate() []error { + //TODO: validate all of the other options... that we should have already been validating... + //TODO: maybe integrate an external validation lib: https://github.com/avelino/awesome-go#validation + return o.Execution.Validate() +} + +// ForEachSpecified enumerates all struct fields and calls the supplied function with each // element that is valid. It panics for any unfamiliar or unexpected fields, so make sure // new fields in Options are accounted for. -func (o Options) ForEachValid(structTag string, callback func(key string, value interface{})) { +func (o Options) ForEachSpecified(structTag string, callback func(key string, value interface{})) { structType := reflect.TypeOf(o) structVal := reflect.ValueOf(o) for i := 0; i < structType.NumField(); i++ { diff --git a/lib/scheduler/base_config.go b/lib/scheduler/base_config.go new file mode 100644 index 00000000000..9aa5f9b4c26 --- /dev/null +++ b/lib/scheduler/base_config.go @@ -0,0 +1,95 @@ +package scheduler + +import ( + "fmt" + "time" + + "github.com/loadimpact/k6/lib/types" + null "gopkg.in/guregu/null.v3" +) + +const minPercentage = 0.01 + +// The maximum time k6 will wait after an iteration is supposed to be done +const maxIterationTimeout = 300 * time.Second + +// BaseConfig contains the common config fields for all schedulers +type BaseConfig struct { + Name string `json:"-"` // set via the JS object key + Type string `json:"type"` + StartTime types.NullDuration `json:"startTime"` + Interruptible null.Bool `json:"interruptible"` + IterationTimeout types.NullDuration `json:"iterationTimeout"` + Env map[string]string `json:"env"` + Exec string `json:"exec"` // function name, externally validated + Percentage float64 `json:"-"` // 100, unless Split() was called + + // Future extensions: tags, distribution, others? +} + +// Make sure we implement the Config interface, even with the BaseConfig! +var _ Config = &BaseConfig{} + +// NewBaseConfig returns a default base config with the default values +func NewBaseConfig(name, configType string, interruptible bool) BaseConfig { + return BaseConfig{ + Name: name, + Type: configType, + Interruptible: null.NewBool(interruptible, false), + IterationTimeout: types.NewNullDuration(30*time.Second, false), + Percentage: 100, + } +} + +// Validate checks some basic things like present name, type, and a positive start time +func (bc BaseConfig) Validate() (errors []error) { + // Some just-in-case checks, since those things are likely checked in other places or + // even assigned by us: + if bc.Name == "" { + errors = append(errors, fmt.Errorf("scheduler name shouldn't be empty")) + } + if bc.Type == "" { + errors = append(errors, fmt.Errorf("missing or empty type field")) + } + if bc.Percentage < minPercentage || bc.Percentage > 100 { + errors = append(errors, fmt.Errorf( + "percentage should be between %f and 100, but is %f", minPercentage, bc.Percentage, + )) + } + // The actually reasonable checks: + if bc.StartTime.Valid && bc.StartTime.Duration < 0 { + errors = append(errors, fmt.Errorf("scheduler start time should be positive")) + } + iterTimeout := time.Duration(bc.IterationTimeout.Duration) + if iterTimeout < 0 || iterTimeout > maxIterationTimeout { + errors = append(errors, fmt.Errorf( + "the iteration timeout should be between 0 and %s, but is %s", maxIterationTimeout, iterTimeout, + )) + } + return errors +} + +// GetBaseConfig just returns itself +func (bc BaseConfig) GetBaseConfig() BaseConfig { + return bc +} + +// CopyWithPercentage is a helper function that just sets the percentage to +// the specified amount. +func (bc BaseConfig) CopyWithPercentage(percentage float64) *BaseConfig { + c := bc + c.Percentage = percentage + return &c +} + +// Split splits the BaseConfig with the accurate percentages +func (bc BaseConfig) Split(percentages []float64) ([]Config, error) { + if err := checkPercentagesSum(percentages); err != nil { + return nil, err + } + configs := make([]Config, len(percentages)) + for i, p := range percentages { + configs[i] = bc.CopyWithPercentage(p) + } + return configs, nil +} diff --git a/lib/scheduler/configmap.go b/lib/scheduler/configmap.go new file mode 100644 index 00000000000..b97b2f3f569 --- /dev/null +++ b/lib/scheduler/configmap.go @@ -0,0 +1,92 @@ +package scheduler + +import ( + "encoding/json" + "fmt" +) + +// GetParsedConfig returns a struct instance corresponding to the supplied +// config type. It will be fully initialized - with both the default values of +// the type, as well as with whatever the user had specified in the JSON +func GetParsedConfig(name, configType string, rawJSON []byte) (result Config, err error) { + switch configType { + case constantLoopingVUsType: + config := NewConstantLoopingVUsConfig(name) + err = json.Unmarshal(rawJSON, &config) + result = config + case variableLoopingVUsType: + config := NewVariableLoopingVUsConfig(name) + err = json.Unmarshal(rawJSON, &config) + result = config + case sharedIterationsType: + config := NewSharedIterationsConfig(name) + err = json.Unmarshal(rawJSON, &config) + result = config + case perVUIterationsType: + config := NewPerVUIterationsConfig(name) + err = json.Unmarshal(rawJSON, &config) + result = config + case constantArrivalRateType: + config := NewConstantArrivalRateConfig(name) + err = json.Unmarshal(rawJSON, &config) + result = config + case variableArrivalRateType: + config := NewVariableArrivalRateConfig(name) + err = json.Unmarshal(rawJSON, &config) + result = config + default: + return nil, fmt.Errorf("unknown execution scheduler type '%s'", configType) + } + return +} + +// ConfigMap can contain mixed scheduler config types +type ConfigMap map[string]Config + +// UnmarshalJSON implements the json.Unmarshaler interface in a two-step manner, +// creating the correct type of configs based on the `type` property. +func (scs *ConfigMap) UnmarshalJSON(b []byte) error { + var protoConfigs map[string]protoConfig + if err := json.Unmarshal(b, &protoConfigs); err != nil { + return err + } + + result := make(ConfigMap, len(protoConfigs)) + for k, v := range protoConfigs { + if v.Type == "" { + return fmt.Errorf("execution config '%s' doesn't have a type value", k) + } + config, err := GetParsedConfig(k, v.Type, v.rawJSON) + if err != nil { + return err + } + result[k] = config + } + + *scs = result + + return nil +} + +// Validate checks if all of the specified scheduler options make sense +func (scs ConfigMap) Validate() (errors []error) { + for name, scheduler := range scs { + if schedErr := scheduler.Validate(); len(schedErr) != 0 { + errors = append(errors, + fmt.Errorf("scheduler %s has errors: %s", name, concatErrors(schedErr, ", "))) + } + } + return errors +} + +type protoConfig struct { + BaseConfig + rawJSON json.RawMessage +} + +// UnmarshalJSON just reads unmarshals the base config (to get the type), but it also +// stores the unprocessed JSON so we can parse the full config in the next step +func (pc *protoConfig) UnmarshalJSON(b []byte) error { + *pc = protoConfig{BaseConfig{}, b} + return json.Unmarshal(b, &pc.BaseConfig) +} diff --git a/lib/scheduler/constant_arrival_rate.go b/lib/scheduler/constant_arrival_rate.go new file mode 100644 index 00000000000..c2dd0885204 --- /dev/null +++ b/lib/scheduler/constant_arrival_rate.go @@ -0,0 +1,73 @@ +package scheduler + +import ( + "fmt" + "time" + + "github.com/loadimpact/k6/lib/types" + null "gopkg.in/guregu/null.v3" +) + +const constantArrivalRateType = "constant-arrival-rate" + +// ConstantArrivalRateConfig stores config for the constant arrival-rate scheduler +type ConstantArrivalRateConfig struct { + BaseConfig + Rate null.Int `json:"rate"` + TimeUnit types.NullDuration `json:"timeUnit"` //TODO: rename to something else? + Duration types.NullDuration `json:"duration"` + + // Initialize `PreAllocatedVUs` numeber of VUs, and if more than that are needed, + // they will be dynamically allocated, until `MaxVUs` is reached, which is an + // absolutely hard limit on the number of VUs the scheduler will use + PreAllocatedVUs null.Int `json:"preAllocatedVUs"` + MaxVUs null.Int `json:"maxVUs"` +} + +// NewConstantArrivalRateConfig returns a ConstantArrivalRateConfig with default values +func NewConstantArrivalRateConfig(name string) ConstantArrivalRateConfig { + return ConstantArrivalRateConfig{ + BaseConfig: NewBaseConfig(name, constantArrivalRateType, false), + TimeUnit: types.NewNullDuration(1*time.Second, false), + //TODO: set some default values for PreAllocatedVUs and MaxVUs? + } +} + +// Make sure we implement the Config interface +var _ Config = &ConstantArrivalRateConfig{} + +// Validate makes sure all options are configured and valid +func (carc ConstantArrivalRateConfig) Validate() []error { + errors := carc.BaseConfig.Validate() + if !carc.Rate.Valid { + errors = append(errors, fmt.Errorf("the iteration rate isn't specified")) + } else if carc.Rate.Int64 <= 0 { + errors = append(errors, fmt.Errorf("the iteration rate should be positive")) + } + + if time.Duration(carc.TimeUnit.Duration) < 0 { + errors = append(errors, fmt.Errorf("the timeUnit should be more than 0")) + } + + if !carc.Duration.Valid { + errors = append(errors, fmt.Errorf("the duration is unspecified")) + } else if time.Duration(carc.Duration.Duration) < minDuration { + errors = append(errors, fmt.Errorf( + "the duration should be at least %s, but is %s", minDuration, carc.Duration, + )) + } + + if !carc.PreAllocatedVUs.Valid { + errors = append(errors, fmt.Errorf("the number of preAllocatedVUs isn't specified")) + } else if carc.PreAllocatedVUs.Int64 < 0 { + errors = append(errors, fmt.Errorf("the number of preAllocatedVUs shouldn't be negative")) + } + + if !carc.MaxVUs.Valid { + errors = append(errors, fmt.Errorf("the number of maxVUs isn't specified")) + } else if carc.MaxVUs.Int64 < carc.PreAllocatedVUs.Int64 { + errors = append(errors, fmt.Errorf("maxVUs shouldn't be less than preAllocatedVUs")) + } + + return errors +} diff --git a/lib/scheduler/constant_looping_vus.go b/lib/scheduler/constant_looping_vus.go new file mode 100644 index 00000000000..3de16d724e5 --- /dev/null +++ b/lib/scheduler/constant_looping_vus.go @@ -0,0 +1,70 @@ +package scheduler + +import ( + "fmt" + "time" + + "github.com/loadimpact/k6/lib/types" + null "gopkg.in/guregu/null.v3" +) + +const constantLoopingVUsType = "constant-looping-vus" + +// The minimum duration we'll allow users to schedule. This doesn't affect the stages +// configuration, where 0-duration virtual stages are allowed for instantaneous VU jumps +const minDuration = 1 * time.Second + +// ConstantLoopingVUsConfig stores VUs and duration +type ConstantLoopingVUsConfig struct { + BaseConfig + VUs null.Int `json:"vus"` + Duration types.NullDuration `json:"duration"` +} + +// NewConstantLoopingVUsConfig returns a ConstantLoopingVUsConfig with default values +func NewConstantLoopingVUsConfig(name string) ConstantLoopingVUsConfig { + //TODO: decide if we want interruptible or uninterruptible iterations here? + return ConstantLoopingVUsConfig{BaseConfig: NewBaseConfig(name, constantLoopingVUsType, false)} +} + +// Make sure we implement the Config interface +var _ Config = &ConstantLoopingVUsConfig{} + +// Validate makes sure all options are configured and valid +func (lcv ConstantLoopingVUsConfig) Validate() []error { + errors := lcv.BaseConfig.Validate() + if !lcv.VUs.Valid { + errors = append(errors, fmt.Errorf("the number of VUs isn't specified")) + } else if lcv.VUs.Int64 < 0 { + errors = append(errors, fmt.Errorf("the number of VUs shouldn't be negative")) + } + + if !lcv.Duration.Valid { + errors = append(errors, fmt.Errorf("the duration is unspecified")) + } else if time.Duration(lcv.Duration.Duration) < minDuration { + errors = append(errors, fmt.Errorf( + "the duration should be at least %s, but is %s", minDuration, lcv.Duration, + )) + } + + return errors +} + +// Split divides the VUS as best it can, but keeps the same duration +func (lcv ConstantLoopingVUsConfig) Split(percentages []float64) ([]Config, error) { + if err := checkPercentagesSum(percentages); err != nil { + return nil, err + } + configs := make([]Config, len(percentages)) + for i, p := range percentages { + //TODO: figure out a better approach for the proportional distribution + // of the VUs (which are indivisible items)... + // Some sort of "pick closest match to percentage and adjust remaining"? + configs[i] = &ConstantLoopingVUsConfig{ + BaseConfig: *lcv.BaseConfig.CopyWithPercentage(p), + VUs: null.IntFrom(int64(float64(lcv.VUs.Int64) / p)), + Duration: lcv.Duration, + } + } + return configs, nil +} diff --git a/lib/scheduler/helpers.go b/lib/scheduler/helpers.go new file mode 100644 index 00000000000..339e1bcb74b --- /dev/null +++ b/lib/scheduler/helpers.go @@ -0,0 +1,28 @@ +package scheduler + +import ( + "fmt" + "math" + "strings" +) + +// A helper function to verify percentage distributions +func checkPercentagesSum(percentages []float64) error { + var sum float64 + for _, v := range percentages { + sum += v + } + if math.Abs(100-sum) >= minPercentage { + return fmt.Errorf("split percentage sum is %.2f while it should be 100", sum) + } + return nil +} + +// A helper function for joining error messages into a single string +func concatErrors(errors []error, separator string) string { + errStrings := make([]string, len(errors)) + for i, e := range errors { + errStrings[i] = e.Error() + } + return strings.Join(errStrings, separator) +} diff --git a/lib/scheduler/interfaces.go b/lib/scheduler/interfaces.go new file mode 100644 index 00000000000..316233bba6a --- /dev/null +++ b/lib/scheduler/interfaces.go @@ -0,0 +1,16 @@ +package scheduler + +// Config is an interface that should be implemented by all scheduler config types +type Config interface { + GetBaseConfig() BaseConfig + Validate() []error + // TODO: Split(percentages []float64) ([]Config, error) + // TODO: GetMaxVUs() int64 + // TODO: GetMaxDuration() time.Duration // inclusind max timeouts, if we want to share VUs between schedulers in the future? +} + +// Scheduler is an interface that should be implemented by all scheduler implementations +type Scheduler interface { + Initialize(Config) error + Start() +} diff --git a/lib/scheduler/per_vu_iterations.go b/lib/scheduler/per_vu_iterations.go new file mode 100644 index 00000000000..30bde87f222 --- /dev/null +++ b/lib/scheduler/per_vu_iterations.go @@ -0,0 +1,54 @@ +package scheduler + +import ( + "fmt" + "time" + + "github.com/loadimpact/k6/lib/types" + null "gopkg.in/guregu/null.v3" +) + +const perVUIterationsType = "per-vu-iterations" + +// PerVUIteationsConfig stores the number of VUs iterations, as well as maxDuration settings +type PerVUIteationsConfig struct { + BaseConfig + VUs null.Int `json:"vus"` + Iterations null.Int `json:"iterations"` + MaxDuration types.NullDuration `json:"maxDuration"` +} + +// NewPerVUIterationsConfig returns a PerVUIteationsConfig with default values +func NewPerVUIterationsConfig(name string) PerVUIteationsConfig { + return PerVUIteationsConfig{ + BaseConfig: NewBaseConfig(name, perVUIterationsType, false), + MaxDuration: types.NewNullDuration(1*time.Hour, false), + } +} + +// Make sure we implement the Config interface +var _ Config = &PerVUIteationsConfig{} + +// Validate makes sure all options are configured and valid +func (pvic PerVUIteationsConfig) Validate() []error { + errors := pvic.BaseConfig.Validate() + if !pvic.VUs.Valid { + errors = append(errors, fmt.Errorf("the number of VUs isn't specified")) + } else if pvic.VUs.Int64 <= 0 { + errors = append(errors, fmt.Errorf("the number of VUs should be more than 0")) + } + + if !pvic.Iterations.Valid { + errors = append(errors, fmt.Errorf("the number of iterations isn't specified")) + } else if pvic.Iterations.Int64 <= 0 { + errors = append(errors, fmt.Errorf("the number of iterations should be more than 0")) + } + + if time.Duration(pvic.MaxDuration.Duration) < minDuration { + errors = append(errors, fmt.Errorf( + "the maxDuration should be at least %s, but is %s", minDuration, pvic.MaxDuration, + )) + } + + return errors +} diff --git a/lib/scheduler/shared_iterations.go b/lib/scheduler/shared_iterations.go new file mode 100644 index 00000000000..2d29453980e --- /dev/null +++ b/lib/scheduler/shared_iterations.go @@ -0,0 +1,54 @@ +package scheduler + +import ( + "fmt" + "time" + + "github.com/loadimpact/k6/lib/types" + null "gopkg.in/guregu/null.v3" +) + +const sharedIterationsType = "shared-iterations" + +// SharedIteationsConfig stores the number of VUs iterations, as well as maxDuration settings +type SharedIteationsConfig struct { + BaseConfig + VUs null.Int `json:"vus"` + Iterations null.Int `json:"iterations"` + MaxDuration types.NullDuration `json:"maxDuration"` +} + +// NewSharedIterationsConfig returns a SharedIteationsConfig with default values +func NewSharedIterationsConfig(name string) SharedIteationsConfig { + return SharedIteationsConfig{ + BaseConfig: NewBaseConfig(name, sharedIterationsType, false), + MaxDuration: types.NewNullDuration(1*time.Hour, false), + } +} + +// Make sure we implement the Config interface +var _ Config = &SharedIteationsConfig{} + +// Validate makes sure all options are configured and valid +func (sic SharedIteationsConfig) Validate() []error { + errors := sic.BaseConfig.Validate() + if !sic.VUs.Valid { + errors = append(errors, fmt.Errorf("the number of VUs isn't specified")) + } else if sic.VUs.Int64 <= 0 { + errors = append(errors, fmt.Errorf("the number of VUs should be more than 0")) + } + + if !sic.Iterations.Valid { + errors = append(errors, fmt.Errorf("the number of iterations isn't specified")) + } else if sic.Iterations.Int64 < sic.VUs.Int64 { + errors = append(errors, fmt.Errorf("the number of iterations shouldn't be less than the number of VUs")) + } + + if time.Duration(sic.MaxDuration.Duration) < minDuration { + errors = append(errors, fmt.Errorf( + "the maxDuration should be at least %s, but is %s", minDuration, sic.MaxDuration, + )) + } + + return errors +} diff --git a/lib/scheduler/variable_arrival_rate.go b/lib/scheduler/variable_arrival_rate.go new file mode 100644 index 00000000000..cc85cfdc334 --- /dev/null +++ b/lib/scheduler/variable_arrival_rate.go @@ -0,0 +1,68 @@ +package scheduler + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/loadimpact/k6/lib/types" + null "gopkg.in/guregu/null.v3" +) + +const variableArrivalRateType = "variable-arrival-rate" + +// VariableArrivalRateConfig stores config for the variable arrival-rate scheduler +type VariableArrivalRateConfig struct { + BaseConfig + StartRate null.Int `json:"startRate"` + TimeUnit types.NullDuration `json:"timeUnit"` //TODO: rename to something else? + Stages json.RawMessage `json:"stages"` //TODO: figure out some equivalent to stages? + //TODO: maybe move common parts between this and the ConstantArrivalRateConfig in another struct? + + // Initialize `PreAllocatedVUs` numeber of VUs, and if more than that are needed, + // they will be dynamically allocated, until `MaxVUs` is reached, which is an + // absolutely hard limit on the number of VUs the scheduler will use + PreAllocatedVUs null.Int `json:"preAllocatedVUs"` + MaxVUs null.Int `json:"maxVUs"` +} + +// NewVariableArrivalRateConfig returns a VariableArrivalRateConfig with default values +func NewVariableArrivalRateConfig(name string) VariableArrivalRateConfig { + return VariableArrivalRateConfig{ + BaseConfig: NewBaseConfig(name, variableArrivalRateType, false), + TimeUnit: types.NewNullDuration(1*time.Second, false), + //TODO: set some default values for PreAllocatedVUs and MaxVUs? + } +} + +// Make sure we implement the Config interface +var _ Config = &VariableArrivalRateConfig{} + +// Validate makes sure all options are configured and valid +func (varc VariableArrivalRateConfig) Validate() []error { + errors := varc.BaseConfig.Validate() + + if varc.StartRate.Int64 < 0 { + errors = append(errors, fmt.Errorf("the startRate value shouldn't be negative")) + } + + if time.Duration(varc.TimeUnit.Duration) < 0 { + errors = append(errors, fmt.Errorf("the timeUnit should be more than 0")) + } + + //TODO: stages valiadtion + + if !varc.PreAllocatedVUs.Valid { + errors = append(errors, fmt.Errorf("the number of preAllocatedVUs isn't specified")) + } else if varc.PreAllocatedVUs.Int64 < 0 { + errors = append(errors, fmt.Errorf("the number of preAllocatedVUs shouldn't be negative")) + } + + if !varc.MaxVUs.Valid { + errors = append(errors, fmt.Errorf("the number of maxVUs isn't specified")) + } else if varc.MaxVUs.Int64 < varc.PreAllocatedVUs.Int64 { + errors = append(errors, fmt.Errorf("maxVUs shouldn't be less than preAllocatedVUs")) + } + + return errors +} diff --git a/lib/scheduler/variable_looping_vus.go b/lib/scheduler/variable_looping_vus.go new file mode 100644 index 00000000000..7859bce5354 --- /dev/null +++ b/lib/scheduler/variable_looping_vus.go @@ -0,0 +1,59 @@ +package scheduler + +import ( + "fmt" + + "github.com/loadimpact/k6/lib/types" + null "gopkg.in/guregu/null.v3" +) + +const variableLoopingVUsType = "variable-looping-vus" + +// Stage contains +type Stage struct { + Duration types.NullDuration `json:"duration"` + Target null.Int `json:"target"` // TODO: maybe rename this to endVUs? +} + +// VariableLoopingVUsConfig stores the configuration for the stages scheduler +type VariableLoopingVUsConfig struct { + BaseConfig + StartVUs null.Int `json:"startVUs"` + Stages []Stage `json:"stages"` +} + +// NewVariableLoopingVUsConfig returns a VariableLoopingVUsConfig with its default values +func NewVariableLoopingVUsConfig(name string) VariableLoopingVUsConfig { + return VariableLoopingVUsConfig{BaseConfig: NewBaseConfig(name, variableLoopingVUsType, false)} +} + +// Make sure we implement the Config interface +var _ Config = &VariableLoopingVUsConfig{} + +// Validate makes sure all options are configured and valid +func (ls VariableLoopingVUsConfig) Validate() []error { + errors := ls.BaseConfig.Validate() + if ls.StartVUs.Int64 < 0 { + errors = append(errors, fmt.Errorf("the number of start VUs shouldn't be negative")) + } + + if len(ls.Stages) == 0 { + errors = append(errors, fmt.Errorf("at least one stage has to be specified")) + } else { + for i, s := range ls.Stages { + stageNum := i + 1 + if !s.Duration.Valid { + errors = append(errors, fmt.Errorf("stage %d doesn't have a duration", stageNum)) + } else if s.Duration.Duration < 0 { + errors = append(errors, fmt.Errorf("the duration for stage %d shouldn't be negative", stageNum)) + } + if !s.Target.Valid { + errors = append(errors, fmt.Errorf("stage %d doesn't have a target", stageNum)) + } else if s.Target.Int64 < 0 { + errors = append(errors, fmt.Errorf("the target for stage %d shouldn't be negative", stageNum)) + } + } + } + + return errors +} From 15a412899d1b52a995caac25ee573a1e7c59ee72 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Wed, 30 Jan 2019 09:30:47 +0200 Subject: [PATCH 02/36] Fix minor issues linting and test issues --- cmd/run.go | 3 --- lib/options.go | 2 +- lib/scheduler/constant_arrival_rate.go | 2 +- lib/scheduler/variable_arrival_rate.go | 2 +- 4 files changed, 3 insertions(+), 6 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index f51497d9a2c..d5adc0d2798 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -37,8 +37,6 @@ import ( "syscall" "time" - "github.com/davecgh/go-spew/spew" - "github.com/loadimpact/k6/api" "github.com/loadimpact/k6/core" "github.com/loadimpact/k6/core/local" @@ -169,7 +167,6 @@ a commandline interface for interacting with it.`, } //TODO: move a bunch of the logic above to a config "constructor" and to the Validate() method - spew.Dump(conf.Options.Execution) if errList := conf.Validate(); len(errList) != 0 { errMsg := []string{ fmt.Sprintf("There were %d errors with the script options:", len(errList)), diff --git a/lib/options.go b/lib/options.go index 20e3f29961d..fbaf61c865f 100644 --- a/lib/options.go +++ b/lib/options.go @@ -195,7 +195,7 @@ type Options struct { Iterations null.Int `json:"iterations" envconfig:"iterations"` Stages []Stage `json:"stages" envconfig:"stages"` - Execution scheduler.ConfigMap `json:"execution" envconfig:"execution"` + Execution scheduler.ConfigMap `json:"execution,omitempty" envconfig:"execution"` // Timeouts for the setup() and teardown() functions SetupTimeout types.NullDuration `json:"setupTimeout" envconfig:"setup_timeout"` diff --git a/lib/scheduler/constant_arrival_rate.go b/lib/scheduler/constant_arrival_rate.go index c2dd0885204..781a115e3fd 100644 --- a/lib/scheduler/constant_arrival_rate.go +++ b/lib/scheduler/constant_arrival_rate.go @@ -17,7 +17,7 @@ type ConstantArrivalRateConfig struct { TimeUnit types.NullDuration `json:"timeUnit"` //TODO: rename to something else? Duration types.NullDuration `json:"duration"` - // Initialize `PreAllocatedVUs` numeber of VUs, and if more than that are needed, + // Initialize `PreAllocatedVUs` number of VUs, and if more than that are needed, // they will be dynamically allocated, until `MaxVUs` is reached, which is an // absolutely hard limit on the number of VUs the scheduler will use PreAllocatedVUs null.Int `json:"preAllocatedVUs"` diff --git a/lib/scheduler/variable_arrival_rate.go b/lib/scheduler/variable_arrival_rate.go index cc85cfdc334..ec8b8bd5a56 100644 --- a/lib/scheduler/variable_arrival_rate.go +++ b/lib/scheduler/variable_arrival_rate.go @@ -19,7 +19,7 @@ type VariableArrivalRateConfig struct { Stages json.RawMessage `json:"stages"` //TODO: figure out some equivalent to stages? //TODO: maybe move common parts between this and the ConstantArrivalRateConfig in another struct? - // Initialize `PreAllocatedVUs` numeber of VUs, and if more than that are needed, + // Initialize `PreAllocatedVUs` number of VUs, and if more than that are needed, // they will be dynamically allocated, until `MaxVUs` is reached, which is an // absolutely hard limit on the number of VUs the scheduler will use PreAllocatedVUs null.Int `json:"preAllocatedVUs"` From 4a1c941dc5cbb31b71aa6c2b1e797d8488cbb9ec Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Thu, 31 Jan 2019 11:24:12 +0200 Subject: [PATCH 03/36] Break apart the scheduler config type initializations --- lib/scheduler/configmap.go | 90 +++++++++++++++++--------- lib/scheduler/constant_arrival_rate.go | 9 +++ lib/scheduler/constant_looping_vus.go | 9 +++ lib/scheduler/per_vu_iterations.go | 9 +++ lib/scheduler/shared_iterations.go | 9 +++ lib/scheduler/variable_arrival_rate.go | 8 +++ lib/scheduler/variable_looping_vus.go | 9 +++ 7 files changed, 113 insertions(+), 30 deletions(-) diff --git a/lib/scheduler/configmap.go b/lib/scheduler/configmap.go index b97b2f3f569..83dad848855 100644 --- a/lib/scheduler/configmap.go +++ b/lib/scheduler/configmap.go @@ -3,45 +3,75 @@ package scheduler import ( "encoding/json" "fmt" + "sync" ) +// ConfigMap can contain mixed scheduler config types +type ConfigMap map[string]Config + +// ConfigConstructor is a simple function that returns a concrete Config instance +// with the specified name and all default values correctly initialized +type ConfigConstructor func(name string, rawJSON []byte) (Config, error) + +var ( + configTypesMutex sync.RWMutex + configConstructors = make(map[string]ConfigConstructor) +) + +// RegisterConfigType adds the supplied ConfigConstructor as the constructor for its +// type in the configConstructors map, in a thread-safe manner +func RegisterConfigType(configType string, constructor ConfigConstructor) { + configTypesMutex.Lock() + defer configTypesMutex.Unlock() + + if constructor == nil { + panic("scheduler configs: constructor is nil") + } + if _, configTypeExists := configConstructors[configType]; configTypeExists { + panic("scheduler configs: RegisterConfigType called twice for " + configType) + } + + configConstructors[configType] = constructor +} + // GetParsedConfig returns a struct instance corresponding to the supplied // config type. It will be fully initialized - with both the default values of // the type, as well as with whatever the user had specified in the JSON func GetParsedConfig(name, configType string, rawJSON []byte) (result Config, err error) { - switch configType { - case constantLoopingVUsType: - config := NewConstantLoopingVUsConfig(name) - err = json.Unmarshal(rawJSON, &config) - result = config - case variableLoopingVUsType: - config := NewVariableLoopingVUsConfig(name) - err = json.Unmarshal(rawJSON, &config) - result = config - case sharedIterationsType: - config := NewSharedIterationsConfig(name) - err = json.Unmarshal(rawJSON, &config) - result = config - case perVUIterationsType: - config := NewPerVUIterationsConfig(name) - err = json.Unmarshal(rawJSON, &config) - result = config - case constantArrivalRateType: - config := NewConstantArrivalRateConfig(name) - err = json.Unmarshal(rawJSON, &config) - result = config - case variableArrivalRateType: - config := NewVariableArrivalRateConfig(name) - err = json.Unmarshal(rawJSON, &config) - result = config - default: + configTypesMutex.Lock() + defer configTypesMutex.Unlock() + + constructor, exists := configConstructors[configType] + if !exists { return nil, fmt.Errorf("unknown execution scheduler type '%s'", configType) } - return -} + return constructor(name, rawJSON) + /* + switch configType { + case constantLoopingVUsType: + config := NewConstantLoopingVUsConfig(name) + err = json.Unmarshal(rawJSON, &config) + result = config + case variableLoopingVUsType: + config := NewVariableLoopingVUsConfig(name) + err = json.Unmarshal(rawJSON, &config) + result = config + case sharedIterationsType: + config := NewSharedIterationsConfig(name) + err = json.Unmarshal(rawJSON, &config) + result = config + case perVUIterationsType: + config := NewPerVUIterationsConfig(name) + err = json.Unmarshal(rawJSON, &config) + result = config + case constantArrivalRateType: -// ConfigMap can contain mixed scheduler config types -type ConfigMap map[string]Config + case variableArrivalRateType: + config := NewVariableArrivalRateConfig(name) + err = json.Unmarshal(rawJSON, &config) + result = config + */ +} // UnmarshalJSON implements the json.Unmarshaler interface in a two-step manner, // creating the correct type of configs based on the `type` property. diff --git a/lib/scheduler/constant_arrival_rate.go b/lib/scheduler/constant_arrival_rate.go index 781a115e3fd..6319af0753a 100644 --- a/lib/scheduler/constant_arrival_rate.go +++ b/lib/scheduler/constant_arrival_rate.go @@ -1,6 +1,7 @@ package scheduler import ( + "encoding/json" "fmt" "time" @@ -10,6 +11,14 @@ import ( const constantArrivalRateType = "constant-arrival-rate" +func init() { + RegisterConfigType(constantArrivalRateType, func(name string, rawJSON []byte) (Config, error) { + config := NewConstantArrivalRateConfig(name) + err := json.Unmarshal(rawJSON, &config) + return config, err + }) +} + // ConstantArrivalRateConfig stores config for the constant arrival-rate scheduler type ConstantArrivalRateConfig struct { BaseConfig diff --git a/lib/scheduler/constant_looping_vus.go b/lib/scheduler/constant_looping_vus.go index 3de16d724e5..5b55ed9a8fc 100644 --- a/lib/scheduler/constant_looping_vus.go +++ b/lib/scheduler/constant_looping_vus.go @@ -1,6 +1,7 @@ package scheduler import ( + "encoding/json" "fmt" "time" @@ -10,6 +11,14 @@ import ( const constantLoopingVUsType = "constant-looping-vus" +func init() { + RegisterConfigType(constantLoopingVUsType, func(name string, rawJSON []byte) (Config, error) { + config := NewConstantLoopingVUsConfig(name) + err := json.Unmarshal(rawJSON, &config) + return config, err + }) +} + // The minimum duration we'll allow users to schedule. This doesn't affect the stages // configuration, where 0-duration virtual stages are allowed for instantaneous VU jumps const minDuration = 1 * time.Second diff --git a/lib/scheduler/per_vu_iterations.go b/lib/scheduler/per_vu_iterations.go index 30bde87f222..8d69027f1ac 100644 --- a/lib/scheduler/per_vu_iterations.go +++ b/lib/scheduler/per_vu_iterations.go @@ -1,6 +1,7 @@ package scheduler import ( + "encoding/json" "fmt" "time" @@ -10,6 +11,14 @@ import ( const perVUIterationsType = "per-vu-iterations" +func init() { + RegisterConfigType(perVUIterationsType, func(name string, rawJSON []byte) (Config, error) { + config := NewPerVUIterationsConfig(name) + err := json.Unmarshal(rawJSON, &config) + return config, err + }) +} + // PerVUIteationsConfig stores the number of VUs iterations, as well as maxDuration settings type PerVUIteationsConfig struct { BaseConfig diff --git a/lib/scheduler/shared_iterations.go b/lib/scheduler/shared_iterations.go index 2d29453980e..998bca64096 100644 --- a/lib/scheduler/shared_iterations.go +++ b/lib/scheduler/shared_iterations.go @@ -1,6 +1,7 @@ package scheduler import ( + "encoding/json" "fmt" "time" @@ -10,6 +11,14 @@ import ( const sharedIterationsType = "shared-iterations" +func init() { + RegisterConfigType(sharedIterationsType, func(name string, rawJSON []byte) (Config, error) { + config := NewSharedIterationsConfig(name) + err := json.Unmarshal(rawJSON, &config) + return config, err + }) +} + // SharedIteationsConfig stores the number of VUs iterations, as well as maxDuration settings type SharedIteationsConfig struct { BaseConfig diff --git a/lib/scheduler/variable_arrival_rate.go b/lib/scheduler/variable_arrival_rate.go index ec8b8bd5a56..0acf90163e7 100644 --- a/lib/scheduler/variable_arrival_rate.go +++ b/lib/scheduler/variable_arrival_rate.go @@ -11,6 +11,14 @@ import ( const variableArrivalRateType = "variable-arrival-rate" +func init() { + RegisterConfigType(variableArrivalRateType, func(name string, rawJSON []byte) (Config, error) { + config := NewVariableArrivalRateConfig(name) + err := json.Unmarshal(rawJSON, &config) + return config, err + }) +} + // VariableArrivalRateConfig stores config for the variable arrival-rate scheduler type VariableArrivalRateConfig struct { BaseConfig diff --git a/lib/scheduler/variable_looping_vus.go b/lib/scheduler/variable_looping_vus.go index 7859bce5354..d744c2c9f3a 100644 --- a/lib/scheduler/variable_looping_vus.go +++ b/lib/scheduler/variable_looping_vus.go @@ -1,6 +1,7 @@ package scheduler import ( + "encoding/json" "fmt" "github.com/loadimpact/k6/lib/types" @@ -9,6 +10,14 @@ import ( const variableLoopingVUsType = "variable-looping-vus" +func init() { + RegisterConfigType(variableLoopingVUsType, func(name string, rawJSON []byte) (Config, error) { + config := NewVariableLoopingVUsConfig(name) + err := json.Unmarshal(rawJSON, &config) + return config, err + }) +} + // Stage contains type Stage struct { Duration types.NullDuration `json:"duration"` From b50ff1831c4aa6f90dbda5b2addafa398ddd447f Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Thu, 31 Jan 2019 11:28:54 +0200 Subject: [PATCH 04/36] Clean up some commented-out code --- lib/scheduler/configmap.go | 25 ------------------------- 1 file changed, 25 deletions(-) diff --git a/lib/scheduler/configmap.go b/lib/scheduler/configmap.go index 83dad848855..2e616828cc0 100644 --- a/lib/scheduler/configmap.go +++ b/lib/scheduler/configmap.go @@ -46,31 +46,6 @@ func GetParsedConfig(name, configType string, rawJSON []byte) (result Config, er return nil, fmt.Errorf("unknown execution scheduler type '%s'", configType) } return constructor(name, rawJSON) - /* - switch configType { - case constantLoopingVUsType: - config := NewConstantLoopingVUsConfig(name) - err = json.Unmarshal(rawJSON, &config) - result = config - case variableLoopingVUsType: - config := NewVariableLoopingVUsConfig(name) - err = json.Unmarshal(rawJSON, &config) - result = config - case sharedIterationsType: - config := NewSharedIterationsConfig(name) - err = json.Unmarshal(rawJSON, &config) - result = config - case perVUIterationsType: - config := NewPerVUIterationsConfig(name) - err = json.Unmarshal(rawJSON, &config) - result = config - case constantArrivalRateType: - - case variableArrivalRateType: - config := NewVariableArrivalRateConfig(name) - err = json.Unmarshal(rawJSON, &config) - result = config - */ } // UnmarshalJSON implements the json.Unmarshaler interface in a two-step manner, From 4821c508fc2d7d2417c4baa04b9f92b2775ba59d Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Mon, 25 Feb 2019 23:29:26 +0200 Subject: [PATCH 05/36] Update and test the new scheduler configurations This should also warn users that use combinations of executions settings which are going to be deprecated. --- README.md | 1 - cmd/config.go | 71 ++++++++- cmd/config_test.go | 5 + cmd/run.go | 14 +- lib/options.go | 35 ++++- lib/options_test.go | 12 ++ lib/scheduler/base_config.go | 22 +-- lib/scheduler/configmap.go | 14 +- lib/scheduler/constant_arrival_rate.go | 23 ++- lib/scheduler/constant_looping_vus.go | 19 ++- lib/scheduler/helpers.go | 40 +++++ lib/scheduler/interfaces.go | 15 +- lib/scheduler/per_vu_iterations.go | 18 ++- lib/scheduler/schedulers_test.go | 210 +++++++++++++++++++++++++ lib/scheduler/shared_iterations.go | 33 ++-- lib/scheduler/variable_arrival_rate.go | 29 +++- lib/scheduler/variable_looping_vus.go | 50 +++--- 17 files changed, 523 insertions(+), 88 deletions(-) create mode 100644 lib/scheduler/schedulers_test.go diff --git a/README.md b/README.md index 41e34c51879..db33ee02099 100644 --- a/README.md +++ b/README.md @@ -197,7 +197,6 @@ Configuration mechanisms do have an order of precedence. As presented, options a As shown above, there are several ways to configure the number of simultaneous virtual users k6 will launch. There are also different ways to specify how long those virtual users will be running. For simple tests you can: - Set the test duration by the `--duration`/`-d` CLI flag (or the `K6_DURATION` environment variable and the `duration` script/JSON option). For ease of use, `duration` is specified with human readable values like `1h30m10s` - `k6 run --duration 30s script.js`, `k6 cloud -d 15m10s script.js`, `export K6_DURATION=1h`, etc. If set to `0`, k6 wouldn't stop executing the script unless the user manually stops it. - Set the total number of script iterations with the `--iterations`/`-i` CLI flag (or the `K6_ITERATIONS` environment variable and the `iterations` script/JSON option). k6 will stop executing the script whenever the **total** number of iterations (i.e. the number of iterations across all VUs) reaches the specified number. So if you have `k6 run --iterations 10 --vus 10 script.js`, then each VU would make only a single iteration. -- Set both the test duration and the total number of script iterations. In that case, k6 would stop the script execution whenever either one of the above conditions is reached first. For more complex cases, you can specify execution stages. They are a combination of `duration,target-VUs` pairs. These pairs instruct k6 to linearly ramp up, ramp down, or stay at the number of VUs specified for the period specified. Execution stages can be set via the `stages` script/JSON option as an array of `{ duration: ..., target: ... }` pairs, or with the `--stage`/`-s` CLI flags and the `K6_STAGE` environment variable via the `duration:target,duration:target...` syntax. diff --git a/cmd/config.go b/cmd/config.go index 5eb2bd4d6a7..d8a1f59fb4c 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -27,10 +27,13 @@ import ( "github.com/kelseyhightower/envconfig" "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/scheduler" "github.com/loadimpact/k6/stats/cloud" "github.com/loadimpact/k6/stats/influxdb" "github.com/loadimpact/k6/stats/kafka" + "github.com/pkg/errors" "github.com/shibukawa/configdir" + log "github.com/sirupsen/logrus" "github.com/spf13/afero" "github.com/spf13/pflag" null "gopkg.in/guregu/null.v3" @@ -171,6 +174,71 @@ func readEnvConfig() (conf Config, err error) { return conf, nil } +// This checks for conflicting options and turns any shortcut options (i.e. duration, iterations, +// stages) into the proper scheduler configuration +func buildExecutionConfig(conf Config) (Config, error) { + result := conf + if conf.Duration.Valid { + if conf.Iterations.Valid { + //TODO: make this an error in the next version + log.Warnf("Specifying both duration and iterations is deprecated and won't be supported in the future k6 versions") + } + + if conf.Stages != nil { + //TODO: make this an error in the next version + log.Warnf("Specifying both duration and stages is deprecated and won't be supported in the future k6 versions") + } + + if conf.Execution != nil { + return result, errors.New("specifying both duration and execution is not supported") + } + + ds := scheduler.NewConstantLoopingVUsConfig(lib.DefaultSchedulerName) + ds.VUs = conf.VUs + ds.Duration = conf.Duration + result.Execution = scheduler.ConfigMap{lib.DefaultSchedulerName: ds} + } else if conf.Stages != nil { + if conf.Iterations.Valid { + //TODO: make this an error in the next version + log.Warnf("Specifying both iterations and stages is deprecated and won't be supported in the future k6 versions") + } + + if conf.Execution != nil { + return conf, errors.New("specifying both stages and execution is not supported") + } + + ds := scheduler.NewVariableLoopingVUsConfig(lib.DefaultSchedulerName) + ds.StartVUs = conf.VUs + for _, s := range conf.Stages { + if s.Duration.Valid { + ds.Stages = append(ds.Stages, scheduler.Stage{Duration: s.Duration, Target: s.Target}) + } + } + result.Execution = scheduler.ConfigMap{lib.DefaultSchedulerName: ds} + } else if conf.Iterations.Valid || conf.Execution == nil { + // Either shared iterations were explicitly specified via the shortcut option, or no execution + // parameters were specified in any way, which will run the default 1 iteration in 1 VU + if conf.Iterations.Valid && conf.Execution != nil { + return conf, errors.New("specifying both iterations and execution is not supported") + } + + ds := scheduler.NewSharedIterationsConfig(lib.DefaultSchedulerName) + ds.VUs = conf.VUs + if conf.Iterations.Valid { // TODO: fix where the default iterations value is set... sigh... + ds.Iterations = conf.Iterations + } + + result.Execution = scheduler.ConfigMap{lib.DefaultSchedulerName: ds} + } + + //TODO: validate the config; questions: + // - separately validate the duration, iterations and stages for better error messages? + // - or reuse the execution validation somehow, at the end? or something mixed? + // - here or in getConsolidatedConfig() or somewhere else? + + return result, nil +} + // Assemble the final consolidated configuration from all of the different sources: // - start with the CLI-provided options to get shadowed (non-Valid) defaults in there // - add the global file config options @@ -179,6 +247,7 @@ func readEnvConfig() (conf Config, err error) { // - merge the user-supplied CLI flags back in on top, to give them the greatest priority // - set some defaults if they weren't previously specified // TODO: add better validation, more explicit default values and improve consistency between formats +// TODO: accumulate all errors and differentiate between the layers? func getConsolidatedConfig(fs afero.Fs, cliConf Config, runner lib.Runner) (conf Config, err error) { cliConf.Collectors.InfluxDB = influxdb.NewConfig().Apply(cliConf.Collectors.InfluxDB) cliConf.Collectors.Cloud = cloud.NewConfig().Apply(cliConf.Collectors.Cloud) @@ -199,5 +268,5 @@ func getConsolidatedConfig(fs afero.Fs, cliConf Config, runner lib.Runner) (conf } conf = conf.Apply(envConf).Apply(cliConf) - return conf, nil + return buildExecutionConfig(conf) } diff --git a/cmd/config_test.go b/cmd/config_test.go index 7d7be3187f7..a82daed850f 100644 --- a/cmd/config_test.go +++ b/cmd/config_test.go @@ -134,3 +134,8 @@ func TestConfigApply(t *testing.T) { assert.Equal(t, []string{"influxdb", "json"}, conf.Out) }) } + +func TestBuildExecutionConfig(t *testing.T) { + //TODO: test the current config building and constructing of the execution plan, and the emitted warnings + //TODO: test the future full overwriting of the duration/iterations/stages/execution options +} diff --git a/cmd/run.go b/cmd/run.go index d5adc0d2798..81b6b157b3e 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -37,6 +37,8 @@ import ( "syscall" "time" + "github.com/davecgh/go-spew/spew" + "github.com/loadimpact/k6/api" "github.com/loadimpact/k6/core" "github.com/loadimpact/k6/core/local" @@ -61,7 +63,7 @@ const ( teardownTimeoutErrorCode = 101 genericTimeoutErrorCode = 102 genericEngineErrorCode = 103 - invalidOptionsErrorCode = 104 + //invalidOptionsErrorCode = 104 ) var ( @@ -167,14 +169,16 @@ a commandline interface for interacting with it.`, } //TODO: move a bunch of the logic above to a config "constructor" and to the Validate() method + spew.Dump(conf.Execution) if errList := conf.Validate(); len(errList) != 0 { - errMsg := []string{ - fmt.Sprintf("There were %d errors with the script options:", len(errList)), - } + + errMsg := []string{"There were problems with the specified script configuration:"} for _, err := range errList { errMsg = append(errMsg, fmt.Sprintf("\t- %s", err.Error())) } - return ExitCode{errors.New(strings.Join(errMsg, "\n")), invalidOptionsErrorCode} + //TODO: re-enable exiting with validation errors + log.Warn(errors.New(strings.Join(errMsg, "\n"))) + //return ExitCode{errors.New(strings.Join(errMsg, "\n")), invalidOptionsErrorCode} } // If summary trend stats are defined, update the UI to reflect them diff --git a/lib/options.go b/lib/options.go index fbaf61c865f..45b38ee4034 100644 --- a/lib/options.go +++ b/lib/options.go @@ -34,6 +34,11 @@ import ( "gopkg.in/guregu/null.v3" ) +// DefaultSchedulerName is used as the default key/ID of the scheduler config entries +// that were created due to the use of the shortcut execution control options (i.e. duration+vus, +// iterations+vus, or stages) +const DefaultSchedulerName = "default" + // DefaultSystemTagList includes all of the system tags emitted with metrics by default. // Other tags that are not enabled by default include: iter, vu, ocsp_status, ip var DefaultSystemTagList = []string{ @@ -189,7 +194,9 @@ type Options struct { // Initial values for VUs, max VUs, duration cap, iteration cap, and stages. // See the Runner or Executor interfaces for more information. - VUs null.Int `json:"vus" envconfig:"vus"` + VUs null.Int `json:"vus" envconfig:"vus"` + + //TODO: deprecate this? or reuse it in the manual control "scheduler"? VUsMax null.Int `json:"vusMax" envconfig:"vus_max"` Duration types.NullDuration `json:"duration" envconfig:"duration"` Iterations null.Int `json:"iterations" envconfig:"iterations"` @@ -296,6 +303,23 @@ func (o Options) Apply(opts Options) Options { if opts.VUsMax.Valid { o.VUsMax = opts.VUsMax } + + // Specifying duration, iterations, stages, or execution in a "higher" config tier + // will overwrite all of the the previous execution settings (if any) from any + // "lower" config tiers + // Still, if more than one of those options is simultaneously specified in the same + // config tier, they will be preserved, so the validation after we've consolidated + // all of the options can return an error. + if opts.Duration.Valid || opts.Iterations.Valid || opts.Stages != nil || o.Execution != nil { + //TODO: uncomment this after we start using the new schedulers + /* + o.Duration = types.NewNullDuration(0, false) + o.Iterations = null.NewInt(0, false) + o.Stages = nil + o.Execution = nil + */ + } + if opts.Duration.Valid { o.Duration = opts.Duration } @@ -310,12 +334,13 @@ func (o Options) Apply(opts Options) Options { } } } - - //TODO: handle o.Execution overwriting by plain vus/iterations/duration/stages options - if len(opts.Execution) > 0 { + // o.Execution can also be populated by the duration/iterations/stages config shortcuts, but + // that happens after the configuration from the different sources is consolidated. It can't + // happen here, because something like `K6_ITERATIONS=10 k6 run --vus 5 script.js` wont't + // work correctly at this level. + if opts.Execution != nil { o.Execution = opts.Execution } - if opts.SetupTimeout.Valid { o.SetupTimeout = opts.SetupTimeout } diff --git a/lib/options_test.go b/lib/options_test.go index 0e78c38cfbf..1129a88378a 100644 --- a/lib/options_test.go +++ b/lib/options_test.go @@ -30,6 +30,7 @@ import ( "time" "github.com/kelseyhightower/envconfig" + "github.com/loadimpact/k6/lib/scheduler" "github.com/loadimpact/k6/lib/types" "github.com/loadimpact/k6/stats" "github.com/stretchr/testify/assert" @@ -86,6 +87,17 @@ func TestOptions(t *testing.T) { assert.Equal(t, oneStage, opts.Apply(Options{Stages: oneStage}).Stages) assert.Equal(t, oneStage, Options{}.Apply(opts).Apply(Options{Stages: oneStage}).Apply(Options{Stages: oneStage}).Stages) }) + t.Run("Execution", func(t *testing.T) { + sched := scheduler.NewConstantLoopingVUsConfig("test") + sched.VUs = null.IntFrom(123) + sched.Duration = types.NullDurationFrom(3 * time.Minute) + opts := Options{}.Apply(Options{Execution: scheduler.ConfigMap{"test": sched}}) + cs, ok := opts.Execution["test"].(scheduler.ConstantLoopingVUsConfig) + assert.True(t, ok) + assert.Equal(t, int64(123), cs.VUs.Int64) + assert.Equal(t, "3m0s", cs.Duration.String()) + }) + //TODO: test that any execution option overwrites any other lower-level options t.Run("RPS", func(t *testing.T) { opts := Options{}.Apply(Options{RPS: null.IntFrom(12345)}) assert.True(t, opts.RPS.Valid) diff --git a/lib/scheduler/base_config.go b/lib/scheduler/base_config.go index 9aa5f9b4c26..36253cd926c 100644 --- a/lib/scheduler/base_config.go +++ b/lib/scheduler/base_config.go @@ -21,15 +21,12 @@ type BaseConfig struct { Interruptible null.Bool `json:"interruptible"` IterationTimeout types.NullDuration `json:"iterationTimeout"` Env map[string]string `json:"env"` - Exec string `json:"exec"` // function name, externally validated + Exec null.String `json:"exec"` // function name, externally validated Percentage float64 `json:"-"` // 100, unless Split() was called - // Future extensions: tags, distribution, others? + //TODO: future extensions like tags, distribution, others? } -// Make sure we implement the Config interface, even with the BaseConfig! -var _ Config = &BaseConfig{} - // NewBaseConfig returns a default base config with the default values func NewBaseConfig(name, configType string, interruptible bool) BaseConfig { return BaseConfig{ @@ -56,6 +53,9 @@ func (bc BaseConfig) Validate() (errors []error) { "percentage should be between %f and 100, but is %f", minPercentage, bc.Percentage, )) } + if bc.Exec.Valid && bc.Exec.String == "" { + errors = append(errors, fmt.Errorf("exec value cannot be empty")) + } // The actually reasonable checks: if bc.StartTime.Valid && bc.StartTime.Duration < 0 { errors = append(errors, fmt.Errorf("scheduler start time should be positive")) @@ -81,15 +81,3 @@ func (bc BaseConfig) CopyWithPercentage(percentage float64) *BaseConfig { c.Percentage = percentage return &c } - -// Split splits the BaseConfig with the accurate percentages -func (bc BaseConfig) Split(percentages []float64) ([]Config, error) { - if err := checkPercentagesSum(percentages); err != nil { - return nil, err - } - configs := make([]Config, len(percentages)) - for i, p := range percentages { - configs[i] = bc.CopyWithPercentage(p) - } - return configs, nil -} diff --git a/lib/scheduler/configmap.go b/lib/scheduler/configmap.go index 2e616828cc0..1f71b16f1c5 100644 --- a/lib/scheduler/configmap.go +++ b/lib/scheduler/configmap.go @@ -50,9 +50,19 @@ func GetParsedConfig(name, configType string, rawJSON []byte) (result Config, er // UnmarshalJSON implements the json.Unmarshaler interface in a two-step manner, // creating the correct type of configs based on the `type` property. -func (scs *ConfigMap) UnmarshalJSON(b []byte) error { +func (scs *ConfigMap) UnmarshalJSON(data []byte) error { + if len(data) == 0 { + return nil + } + + if len(data) == 4 && string(data) == "null" { + return nil + } + + //TODO: use a more sophisticated combination of dec.Token() and dec.More(), + // which would allow us to support both arrays and maps for this config? var protoConfigs map[string]protoConfig - if err := json.Unmarshal(b, &protoConfigs); err != nil { + if err := strictJSONUnmarshal(data, &protoConfigs); err != nil { return err } diff --git a/lib/scheduler/constant_arrival_rate.go b/lib/scheduler/constant_arrival_rate.go index 6319af0753a..21dc3db7184 100644 --- a/lib/scheduler/constant_arrival_rate.go +++ b/lib/scheduler/constant_arrival_rate.go @@ -1,7 +1,6 @@ package scheduler import ( - "encoding/json" "fmt" "time" @@ -14,7 +13,7 @@ const constantArrivalRateType = "constant-arrival-rate" func init() { RegisterConfigType(constantArrivalRateType, func(name string, rawJSON []byte) (Config, error) { config := NewConstantArrivalRateConfig(name) - err := json.Unmarshal(rawJSON, &config) + err := strictJSONUnmarshal(rawJSON, &config) return config, err }) } @@ -23,7 +22,7 @@ func init() { type ConstantArrivalRateConfig struct { BaseConfig Rate null.Int `json:"rate"` - TimeUnit types.NullDuration `json:"timeUnit"` //TODO: rename to something else? + TimeUnit types.NullDuration `json:"timeUnit"` Duration types.NullDuration `json:"duration"` // Initialize `PreAllocatedVUs` number of VUs, and if more than that are needed, @@ -38,7 +37,6 @@ func NewConstantArrivalRateConfig(name string) ConstantArrivalRateConfig { return ConstantArrivalRateConfig{ BaseConfig: NewBaseConfig(name, constantArrivalRateType, false), TimeUnit: types.NewNullDuration(1*time.Second, false), - //TODO: set some default values for PreAllocatedVUs and MaxVUs? } } @@ -54,7 +52,7 @@ func (carc ConstantArrivalRateConfig) Validate() []error { errors = append(errors, fmt.Errorf("the iteration rate should be positive")) } - if time.Duration(carc.TimeUnit.Duration) < 0 { + if time.Duration(carc.TimeUnit.Duration) <= 0 { errors = append(errors, fmt.Errorf("the timeUnit should be more than 0")) } @@ -80,3 +78,18 @@ func (carc ConstantArrivalRateConfig) Validate() []error { return errors } + +// GetMaxVUs returns the absolute maximum number of possible concurrently running VUs +func (carc ConstantArrivalRateConfig) GetMaxVUs() int64 { + return carc.MaxVUs.Int64 +} + +// GetMaxDuration returns the maximum duration time for this scheduler, including +// the specified iterationTimeout, if the iterations are uninterruptible +func (carc ConstantArrivalRateConfig) GetMaxDuration() time.Duration { + maxDuration := carc.Duration.Duration + if !carc.Interruptible.Bool { + maxDuration += carc.IterationTimeout.Duration + } + return time.Duration(maxDuration) +} diff --git a/lib/scheduler/constant_looping_vus.go b/lib/scheduler/constant_looping_vus.go index 5b55ed9a8fc..29cc5f2afa1 100644 --- a/lib/scheduler/constant_looping_vus.go +++ b/lib/scheduler/constant_looping_vus.go @@ -1,7 +1,6 @@ package scheduler import ( - "encoding/json" "fmt" "time" @@ -14,7 +13,7 @@ const constantLoopingVUsType = "constant-looping-vus" func init() { RegisterConfigType(constantLoopingVUsType, func(name string, rawJSON []byte) (Config, error) { config := NewConstantLoopingVUsConfig(name) - err := json.Unmarshal(rawJSON, &config) + err := strictJSONUnmarshal(rawJSON, &config) return config, err }) } @@ -32,7 +31,6 @@ type ConstantLoopingVUsConfig struct { // NewConstantLoopingVUsConfig returns a ConstantLoopingVUsConfig with default values func NewConstantLoopingVUsConfig(name string) ConstantLoopingVUsConfig { - //TODO: decide if we want interruptible or uninterruptible iterations here? return ConstantLoopingVUsConfig{BaseConfig: NewBaseConfig(name, constantLoopingVUsType, false)} } @@ -59,6 +57,21 @@ func (lcv ConstantLoopingVUsConfig) Validate() []error { return errors } +// GetMaxVUs returns the absolute maximum number of possible concurrently running VUs +func (lcv ConstantLoopingVUsConfig) GetMaxVUs() int64 { + return lcv.VUs.Int64 +} + +// GetMaxDuration returns the maximum duration time for this scheduler, including +// the specified iterationTimeout, if the iterations are uninterruptible +func (lcv ConstantLoopingVUsConfig) GetMaxDuration() time.Duration { + maxDuration := lcv.Duration.Duration + if !lcv.Interruptible.Bool { + maxDuration += lcv.IterationTimeout.Duration + } + return time.Duration(maxDuration) +} + // Split divides the VUS as best it can, but keeps the same duration func (lcv ConstantLoopingVUsConfig) Split(percentages []float64) ([]Config, error) { if err := checkPercentagesSum(percentages); err != nil { diff --git a/lib/scheduler/helpers.go b/lib/scheduler/helpers.go index 339e1bcb74b..e85d622a9c5 100644 --- a/lib/scheduler/helpers.go +++ b/lib/scheduler/helpers.go @@ -1,6 +1,8 @@ package scheduler import ( + "bytes" + "encoding/json" "fmt" "math" "strings" @@ -26,3 +28,41 @@ func concatErrors(errors []error, separator string) string { } return strings.Join(errStrings, separator) } + +// Decode a JSON in a strict manner, emitting an error if there are unknown fields +func strictJSONUnmarshal(data []byte, v interface{}) error { + dec := json.NewDecoder(bytes.NewReader(data)) + dec.DisallowUnknownFields() + dec.UseNumber() + + if err := dec.Decode(&v); err != nil { + return err + } + if dec.More() { + return fmt.Errorf("unexpected data after the JSON object") + } + return nil +} + +// A helper function to avoid code duplication +func validateStages(stages []Stage) []error { + var errors []error + if len(stages) == 0 { + errors = append(errors, fmt.Errorf("at least one stage has to be specified")) + } else { + for i, s := range stages { + stageNum := i + 1 + if !s.Duration.Valid { + errors = append(errors, fmt.Errorf("stage %d doesn't have a duration", stageNum)) + } else if s.Duration.Duration < 0 { + errors = append(errors, fmt.Errorf("the duration for stage %d shouldn't be negative", stageNum)) + } + if !s.Target.Valid { + errors = append(errors, fmt.Errorf("stage %d doesn't have a target", stageNum)) + } else if s.Target.Int64 < 0 { + errors = append(errors, fmt.Errorf("the target for stage %d shouldn't be negative", stageNum)) + } + } + } + return errors +} diff --git a/lib/scheduler/interfaces.go b/lib/scheduler/interfaces.go index 316233bba6a..71868160df1 100644 --- a/lib/scheduler/interfaces.go +++ b/lib/scheduler/interfaces.go @@ -1,16 +1,13 @@ package scheduler +import "time" + // Config is an interface that should be implemented by all scheduler config types type Config interface { GetBaseConfig() BaseConfig Validate() []error - // TODO: Split(percentages []float64) ([]Config, error) - // TODO: GetMaxVUs() int64 - // TODO: GetMaxDuration() time.Duration // inclusind max timeouts, if we want to share VUs between schedulers in the future? -} - -// Scheduler is an interface that should be implemented by all scheduler implementations -type Scheduler interface { - Initialize(Config) error - Start() + GetMaxVUs() int64 + GetMaxDuration() time.Duration // includes max timeouts, to allow us to share VUs between schedulers in the future + //TODO: Split(percentages []float64) ([]Config, error) + //TODO: String() or some other method that would be used for priting descriptions of the currently running schedulers for the UI? } diff --git a/lib/scheduler/per_vu_iterations.go b/lib/scheduler/per_vu_iterations.go index 8d69027f1ac..e744b09b93c 100644 --- a/lib/scheduler/per_vu_iterations.go +++ b/lib/scheduler/per_vu_iterations.go @@ -1,7 +1,6 @@ package scheduler import ( - "encoding/json" "fmt" "time" @@ -14,7 +13,7 @@ const perVUIterationsType = "per-vu-iterations" func init() { RegisterConfigType(perVUIterationsType, func(name string, rawJSON []byte) (Config, error) { config := NewPerVUIterationsConfig(name) - err := json.Unmarshal(rawJSON, &config) + err := strictJSONUnmarshal(rawJSON, &config) return config, err }) } @@ -61,3 +60,18 @@ func (pvic PerVUIteationsConfig) Validate() []error { return errors } + +// GetMaxVUs returns the absolute maximum number of possible concurrently running VUs +func (pvic PerVUIteationsConfig) GetMaxVUs() int64 { + return pvic.VUs.Int64 +} + +// GetMaxDuration returns the maximum duration time for this scheduler, including +// the specified iterationTimeout, if the iterations are uninterruptible +func (pvic PerVUIteationsConfig) GetMaxDuration() time.Duration { + maxDuration := pvic.MaxDuration.Duration + if !pvic.Interruptible.Bool { + maxDuration += pvic.IterationTimeout.Duration + } + return time.Duration(maxDuration) +} diff --git a/lib/scheduler/schedulers_test.go b/lib/scheduler/schedulers_test.go new file mode 100644 index 00000000000..f6aa225df90 --- /dev/null +++ b/lib/scheduler/schedulers_test.go @@ -0,0 +1,210 @@ +package scheduler + +import ( + "encoding/json" + "fmt" + "testing" + "time" + + "github.com/loadimpact/k6/lib/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + null "gopkg.in/guregu/null.v3" +) + +type configMapTestCase struct { + rawJSON string + expectParseError bool + expectValidationError bool + customValidator func(t *testing.T, cm ConfigMap) +} + +var testCases = []configMapTestCase{ + {"", true, false, nil}, + {"1234", true, false, nil}, + {"asdf", true, false, nil}, + {"'adsf'", true, false, nil}, + {"[]", true, false, nil}, + {"{}", false, false, func(t *testing.T, cm ConfigMap) { + assert.Equal(t, cm, ConfigMap{}) + }}, + {"{}asdf", true, false, nil}, + {"null", false, false, func(t *testing.T, cm ConfigMap) { + assert.Nil(t, cm) + }}, + {`{"someKey": {}}`, true, false, nil}, + {`{"someKey": {"type": "constant-blah-blah", "vus": 10, "duration": "60s"}}`, true, false, nil}, + {`{"someKey": {"type": "constant-looping-vus", "uknownField": "should_error"}}`, true, false, nil}, + {`{"someKey": {"type": "constant-looping-vus", "vus": 10, "duration": "60s", "env": 123}}`, true, false, nil}, + + // Validation errors for constant-looping-vus and the base config + {`{"someKey": {"type": "constant-looping-vus", "vus": 10, "duration": "60s", "interruptible": false, + "iterationTimeout": "10s", "startTime": "70s", "env": {"test": "mest"}, "exec": "someFunc"}}`, + false, false, func(t *testing.T, cm ConfigMap) { + sched := NewConstantLoopingVUsConfig("someKey") + sched.VUs = null.IntFrom(10) + sched.Duration = types.NullDurationFrom(1 * time.Minute) + sched.Interruptible = null.BoolFrom(false) + sched.IterationTimeout = types.NullDurationFrom(10 * time.Second) + sched.StartTime = types.NullDurationFrom(70 * time.Second) + sched.Exec = null.StringFrom("someFunc") + sched.Env = map[string]string{"test": "mest"} + require.Equal(t, cm, ConfigMap{"someKey": sched}) + require.Equal(t, sched.BaseConfig, cm["someKey"].GetBaseConfig()) + assert.Equal(t, 70*time.Second, cm["someKey"].GetMaxDuration()) + assert.Equal(t, int64(10), cm["someKey"].GetMaxVUs()) + assert.Empty(t, cm["someKey"].Validate()) + }}, + {`{"": {"type": "constant-looping-vus", "vus": 10, "duration": "60s"}}`, false, true, nil}, + {`{"aname": {"type": "constant-looping-vus"}}`, false, true, nil}, + {`{"aname": {"type": "constant-looping-vus", "vus": 10}}`, false, true, nil}, + {`{"aname": {"type": "constant-looping-vus", "duration": "60s"}}`, false, true, nil}, + {`{"aname": {"type": "constant-looping-vus", "vus": -1, "duration": "60s"}}`, false, true, nil}, + {`{"aname": {"type": "constant-looping-vus", "vus": 10, "duration": "0s"}}`, false, true, nil}, + {`{"aname": {"type": "constant-looping-vus", "vus": 10, "duration": "10s", "startTime": "-10s"}}`, false, true, nil}, + {`{"aname": {"type": "constant-looping-vus", "vus": 10, "duration": "10s", "exec": ""}}`, false, true, nil}, + {`{"aname": {"type": "constant-looping-vus", "vus": 10, "duration": "10s", "iterationTimeout": "-2s"}}`, false, true, nil}, + + // variable-looping-vus + {`{"varloops": {"type": "variable-looping-vus", "startVUs": 20, "iterationTimeout": "15s", + "stages": [{"duration": "60s", "target": 30}, {"duration": "120s", "target": 10}]}}`, + false, false, func(t *testing.T, cm ConfigMap) { + sched := NewVariableLoopingVUsConfig("varloops") + sched.IterationTimeout = types.NullDurationFrom(15 * time.Second) + sched.StartVUs = null.IntFrom(20) + sched.Stages = []Stage{ + Stage{Target: null.IntFrom(30), Duration: types.NullDurationFrom(60 * time.Second)}, + Stage{Target: null.IntFrom(10), Duration: types.NullDurationFrom(120 * time.Second)}, + } + require.Equal(t, cm, ConfigMap{"varloops": sched}) + assert.Equal(t, int64(30), cm["varloops"].GetMaxVUs()) + assert.Equal(t, 195*time.Second, cm["varloops"].GetMaxDuration()) + assert.Empty(t, cm["varloops"].Validate()) + }}, + {`{"varloops": {"type": "variable-looping-vus", "startVUs": 0, "stages": [{"duration": "60s", "target": 0}]}}`, false, false, nil}, + {`{"varloops": {"type": "variable-looping-vus", "startVUs": -1, "stages": [{"duration": "60s", "target": 30}]}}`, false, true, nil}, + {`{"varloops": {"type": "variable-looping-vus", "startVUs": 2, "stages": [{"duration": "-60s", "target": 30}]}}`, false, true, nil}, + {`{"varloops": {"type": "variable-looping-vus", "startVUs": 2, "stages": [{"duration": "60s", "target": -30}]}}`, false, true, nil}, + {`{"varloops": {"type": "variable-looping-vus", "stages": [{"duration": "60s"}]}}`, false, true, nil}, + {`{"varloops": {"type": "variable-looping-vus", "stages": [{"target": 30}]}}`, false, true, nil}, + {`{"varloops": {"type": "variable-looping-vus", "stages": []}}`, false, true, nil}, + {`{"varloops": {"type": "variable-looping-vus"}}`, false, true, nil}, + + // shared-iterations + {`{"ishared": {"type": "shared-iterations", "iterations": 20, "vus": 10}}`, + false, false, func(t *testing.T, cm ConfigMap) { + sched := NewSharedIterationsConfig("ishared") + sched.Iterations = null.IntFrom(20) + sched.VUs = null.IntFrom(10) + require.Equal(t, cm, ConfigMap{"ishared": sched}) + assert.Equal(t, int64(10), cm["ishared"].GetMaxVUs()) + assert.Equal(t, 3630*time.Second, cm["ishared"].GetMaxDuration()) + assert.Empty(t, cm["ishared"].Validate()) + }}, + {`{"ishared": {"type": "shared-iterations", "iterations": 20, "vus": 10, "maxDuration": "30m"}}`, false, false, nil}, + {`{"ishared": {"type": "shared-iterations", "iterations": 20, "vus": 10, "maxDuration": "-3m"}}`, false, true, nil}, + {`{"ishared": {"type": "shared-iterations", "iterations": 20, "vus": 10, "maxDuration": "0s"}}`, false, true, nil}, + {`{"ishared": {"type": "shared-iterations", "iterations": 20, "vus": -10}}`, false, true, nil}, + {`{"ishared": {"type": "shared-iterations", "iterations": -1, "vus": 1}}`, false, true, nil}, + {`{"ishared": {"type": "shared-iterations", "iterations": 20, "vus": 30}}`, false, true, nil}, + + // per-vu-iterations + {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20, "vus": 10}}`, + false, false, func(t *testing.T, cm ConfigMap) { + sched := NewPerVUIterationsConfig("ipervu") + sched.Iterations = null.IntFrom(20) + sched.VUs = null.IntFrom(10) + require.Equal(t, cm, ConfigMap{"ipervu": sched}) + assert.Equal(t, int64(10), cm["ipervu"].GetMaxVUs()) + assert.Equal(t, 3630*time.Second, cm["ipervu"].GetMaxDuration()) + assert.Empty(t, cm["ipervu"].Validate()) + }}, + {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20, "vus": 10}}`, false, false, nil}, + {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20}}`, false, true, nil}, + {`{"ipervu": {"type": "per-vu-iterations", "vus": 10}}`, false, true, nil}, + {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20, "vus": 10, "maxDuration": "-3m"}}`, false, true, nil}, + {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20, "vus": 10, "maxDuration": "0s"}}`, false, true, nil}, + {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20, "vus": -10}}`, false, true, nil}, + {`{"ipervu": {"type": "per-vu-iterations", "iterations": -1, "vus": 1}}`, false, true, nil}, + + // constant-arrival-rate + {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "timeUnit": "1m", "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 30}}`, + false, false, func(t *testing.T, cm ConfigMap) { + sched := NewConstantArrivalRateConfig("carrival") + sched.Rate = null.IntFrom(10) + sched.Duration = types.NullDurationFrom(10 * time.Minute) + sched.TimeUnit = types.NullDurationFrom(1 * time.Minute) + sched.PreAllocatedVUs = null.IntFrom(20) + sched.MaxVUs = null.IntFrom(30) + require.Equal(t, cm, ConfigMap{"carrival": sched}) + assert.Equal(t, int64(30), cm["carrival"].GetMaxVUs()) + assert.Equal(t, 630*time.Second, cm["carrival"].GetMaxDuration()) + assert.Empty(t, cm["carrival"].Validate()) + }}, + {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 30}}`, false, false, nil}, + {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 30, "timeUnit": "-1s"}}`, false, true, nil}, + {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "10m", "preAllocatedVUs": 20}}`, false, true, nil}, + {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "10m", "maxVUs": 30}}`, false, true, nil}, + {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "preAllocatedVUs": 20, "maxVUs": 30}}`, false, true, nil}, + {`{"carrival": {"type": "constant-arrival-rate", "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 30}}`, false, true, nil}, + {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "0m", "preAllocatedVUs": 20, "maxVUs": 30}}`, false, true, nil}, + {`{"carrival": {"type": "constant-arrival-rate", "rate": 0, "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 30}}`, false, true, nil}, + {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 15}}`, false, true, nil}, + {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "0s", "preAllocatedVUs": 20, "maxVUs": 25}}`, false, true, nil}, + {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "10m", "preAllocatedVUs": -2, "maxVUs": 25}}`, false, true, nil}, + + // variable-arrival-rate + {`{"varrival": {"type": "variable-arrival-rate", "startRate": 10, "timeUnit": "30s", "preAllocatedVUs": 20, "maxVUs": 50, + "stages": [{"duration": "3m", "target": 30}, {"duration": "5m", "target": 10}]}}`, + false, false, func(t *testing.T, cm ConfigMap) { + sched := NewVariableArrivalRateConfig("varrival") + sched.StartRate = null.IntFrom(10) + sched.Stages = []Stage{ + Stage{Target: null.IntFrom(30), Duration: types.NullDurationFrom(180 * time.Second)}, + Stage{Target: null.IntFrom(10), Duration: types.NullDurationFrom(300 * time.Second)}, + } + sched.TimeUnit = types.NullDurationFrom(30 * time.Second) + sched.PreAllocatedVUs = null.IntFrom(20) + sched.MaxVUs = null.IntFrom(50) + require.Equal(t, cm, ConfigMap{"varrival": sched}) + assert.Equal(t, int64(50), cm["varrival"].GetMaxVUs()) + assert.Equal(t, 510*time.Second, cm["varrival"].GetMaxDuration()) + assert.Empty(t, cm["varrival"].Validate()) + }}, + {`{"varrival": {"type": "variable-arrival-rate", "preAllocatedVUs": 20, "maxVUs": 50, "stages": [{"duration": "5m", "target": 10}]}}`, false, false, nil}, + {`{"varrival": {"type": "variable-arrival-rate", "preAllocatedVUs": -20, "maxVUs": 50, "stages": [{"duration": "5m", "target": 10}]}}`, false, true, nil}, + {`{"varrival": {"type": "variable-arrival-rate", "startRate": -1, "preAllocatedVUs": 20, "maxVUs": 50, "stages": [{"duration": "5m", "target": 10}]}}`, false, true, nil}, + {`{"varrival": {"type": "variable-arrival-rate", "preAllocatedVUs": 20, "stages": [{"duration": "5m", "target": 10}]}}`, false, true, nil}, + {`{"varrival": {"type": "variable-arrival-rate", "maxVUs": 50, "stages": [{"duration": "5m", "target": 10}]}}`, false, true, nil}, + {`{"varrival": {"type": "variable-arrival-rate", "preAllocatedVUs": 20, "maxVUs": 50}}`, false, true, nil}, + {`{"varrival": {"type": "variable-arrival-rate", "preAllocatedVUs": 20, "maxVUs": 50, "stages": []}}`, false, true, nil}, + {`{"varrival": {"type": "variable-arrival-rate", "preAllocatedVUs": 20, "maxVUs": 50, "stages": [{"duration": "5m", "target": 10}], "timeUnit": "-1s"}}`, false, true, nil}, + {`{"varrival": {"type": "variable-arrival-rate", "preAllocatedVUs": 30, "maxVUs": 20, "stages": [{"duration": "5m", "target": 10}]}}`, false, true, nil}, +} + +func TestConfigMapParsingAndValidation(t *testing.T) { + for i, tc := range testCases { + tc := tc + t.Run(fmt.Sprintf("TestCase#%d", i), func(t *testing.T) { + var result ConfigMap + err := json.Unmarshal([]byte(tc.rawJSON), &result) + if tc.expectParseError { + require.Error(t, err) + return + } + require.NoError(t, err) + + validationErrors := result.Validate() + if tc.expectValidationError { + assert.NotEmpty(t, validationErrors) + } else { + assert.Empty(t, validationErrors) + } + if tc.customValidator != nil { + tc.customValidator(t, result) + } + }) + } +} + +//TODO: check percentage split calculations diff --git a/lib/scheduler/shared_iterations.go b/lib/scheduler/shared_iterations.go index 998bca64096..9dc6c790104 100644 --- a/lib/scheduler/shared_iterations.go +++ b/lib/scheduler/shared_iterations.go @@ -1,7 +1,6 @@ package scheduler import ( - "encoding/json" "fmt" "time" @@ -14,7 +13,7 @@ const sharedIterationsType = "shared-iterations" func init() { RegisterConfigType(sharedIterationsType, func(name string, rawJSON []byte) (Config, error) { config := NewSharedIterationsConfig(name) - err := json.Unmarshal(rawJSON, &config) + err := strictJSONUnmarshal(rawJSON, &config) return config, err }) } @@ -31,6 +30,8 @@ type SharedIteationsConfig struct { func NewSharedIterationsConfig(name string) SharedIteationsConfig { return SharedIteationsConfig{ BaseConfig: NewBaseConfig(name, sharedIterationsType, false), + VUs: null.NewInt(1, false), //TODO: set defaults in another place? + Iterations: null.NewInt(1, false), MaxDuration: types.NewNullDuration(1*time.Hour, false), } } @@ -41,16 +42,15 @@ var _ Config = &SharedIteationsConfig{} // Validate makes sure all options are configured and valid func (sic SharedIteationsConfig) Validate() []error { errors := sic.BaseConfig.Validate() - if !sic.VUs.Valid { - errors = append(errors, fmt.Errorf("the number of VUs isn't specified")) - } else if sic.VUs.Int64 <= 0 { + if sic.VUs.Int64 <= 0 { errors = append(errors, fmt.Errorf("the number of VUs should be more than 0")) } - if !sic.Iterations.Valid { - errors = append(errors, fmt.Errorf("the number of iterations isn't specified")) - } else if sic.Iterations.Int64 < sic.VUs.Int64 { - errors = append(errors, fmt.Errorf("the number of iterations shouldn't be less than the number of VUs")) + if sic.Iterations.Int64 < sic.VUs.Int64 { + errors = append(errors, fmt.Errorf( + "the number of iterations (%d) shouldn't be less than the number of VUs (%d)", + sic.Iterations.Int64, sic.VUs.Int64, + )) } if time.Duration(sic.MaxDuration.Duration) < minDuration { @@ -61,3 +61,18 @@ func (sic SharedIteationsConfig) Validate() []error { return errors } + +// GetMaxVUs returns the absolute maximum number of possible concurrently running VUs +func (sic SharedIteationsConfig) GetMaxVUs() int64 { + return sic.VUs.Int64 +} + +// GetMaxDuration returns the maximum duration time for this scheduler, including +// the specified iterationTimeout, if the iterations are uninterruptible +func (sic SharedIteationsConfig) GetMaxDuration() time.Duration { + maxDuration := sic.MaxDuration.Duration + if !sic.Interruptible.Bool { + maxDuration += sic.IterationTimeout.Duration + } + return time.Duration(maxDuration) +} diff --git a/lib/scheduler/variable_arrival_rate.go b/lib/scheduler/variable_arrival_rate.go index 0acf90163e7..f8fb85dc607 100644 --- a/lib/scheduler/variable_arrival_rate.go +++ b/lib/scheduler/variable_arrival_rate.go @@ -1,7 +1,6 @@ package scheduler import ( - "encoding/json" "fmt" "time" @@ -14,7 +13,7 @@ const variableArrivalRateType = "variable-arrival-rate" func init() { RegisterConfigType(variableArrivalRateType, func(name string, rawJSON []byte) (Config, error) { config := NewVariableArrivalRateConfig(name) - err := json.Unmarshal(rawJSON, &config) + err := strictJSONUnmarshal(rawJSON, &config) return config, err }) } @@ -23,9 +22,8 @@ func init() { type VariableArrivalRateConfig struct { BaseConfig StartRate null.Int `json:"startRate"` - TimeUnit types.NullDuration `json:"timeUnit"` //TODO: rename to something else? - Stages json.RawMessage `json:"stages"` //TODO: figure out some equivalent to stages? - //TODO: maybe move common parts between this and the ConstantArrivalRateConfig in another struct? + TimeUnit types.NullDuration `json:"timeUnit"` + Stages []Stage `json:"stages"` // Initialize `PreAllocatedVUs` number of VUs, and if more than that are needed, // they will be dynamically allocated, until `MaxVUs` is reached, which is an @@ -39,7 +37,6 @@ func NewVariableArrivalRateConfig(name string) VariableArrivalRateConfig { return VariableArrivalRateConfig{ BaseConfig: NewBaseConfig(name, variableArrivalRateType, false), TimeUnit: types.NewNullDuration(1*time.Second, false), - //TODO: set some default values for PreAllocatedVUs and MaxVUs? } } @@ -58,7 +55,7 @@ func (varc VariableArrivalRateConfig) Validate() []error { errors = append(errors, fmt.Errorf("the timeUnit should be more than 0")) } - //TODO: stages valiadtion + errors = append(errors, validateStages(varc.Stages)...) if !varc.PreAllocatedVUs.Valid { errors = append(errors, fmt.Errorf("the number of preAllocatedVUs isn't specified")) @@ -74,3 +71,21 @@ func (varc VariableArrivalRateConfig) Validate() []error { return errors } + +// GetMaxVUs returns the absolute maximum number of possible concurrently running VUs +func (varc VariableArrivalRateConfig) GetMaxVUs() int64 { + return varc.MaxVUs.Int64 +} + +// GetMaxDuration returns the maximum duration time for this scheduler, including +// the specified iterationTimeout, if the iterations are uninterruptible +func (varc VariableArrivalRateConfig) GetMaxDuration() time.Duration { + var maxDuration types.Duration + for _, s := range varc.Stages { + maxDuration += s.Duration.Duration + } + if !varc.Interruptible.Bool { + maxDuration += varc.IterationTimeout.Duration + } + return time.Duration(maxDuration) +} diff --git a/lib/scheduler/variable_looping_vus.go b/lib/scheduler/variable_looping_vus.go index d744c2c9f3a..81451b43026 100644 --- a/lib/scheduler/variable_looping_vus.go +++ b/lib/scheduler/variable_looping_vus.go @@ -1,8 +1,8 @@ package scheduler import ( - "encoding/json" "fmt" + "time" "github.com/loadimpact/k6/lib/types" null "gopkg.in/guregu/null.v3" @@ -13,7 +13,7 @@ const variableLoopingVUsType = "variable-looping-vus" func init() { RegisterConfigType(variableLoopingVUsType, func(name string, rawJSON []byte) (Config, error) { config := NewVariableLoopingVUsConfig(name) - err := json.Unmarshal(rawJSON, &config) + err := strictJSONUnmarshal(rawJSON, &config) return config, err }) } @@ -21,7 +21,7 @@ func init() { // Stage contains type Stage struct { Duration types.NullDuration `json:"duration"` - Target null.Int `json:"target"` // TODO: maybe rename this to endVUs? + Target null.Int `json:"target"` // TODO: maybe rename this to endVUs? something else? } // VariableLoopingVUsConfig stores the configuration for the stages scheduler @@ -40,29 +40,35 @@ func NewVariableLoopingVUsConfig(name string) VariableLoopingVUsConfig { var _ Config = &VariableLoopingVUsConfig{} // Validate makes sure all options are configured and valid -func (ls VariableLoopingVUsConfig) Validate() []error { - errors := ls.BaseConfig.Validate() - if ls.StartVUs.Int64 < 0 { +func (vlvc VariableLoopingVUsConfig) Validate() []error { + errors := vlvc.BaseConfig.Validate() + if vlvc.StartVUs.Int64 < 0 { errors = append(errors, fmt.Errorf("the number of start VUs shouldn't be negative")) } - if len(ls.Stages) == 0 { - errors = append(errors, fmt.Errorf("at least one stage has to be specified")) - } else { - for i, s := range ls.Stages { - stageNum := i + 1 - if !s.Duration.Valid { - errors = append(errors, fmt.Errorf("stage %d doesn't have a duration", stageNum)) - } else if s.Duration.Duration < 0 { - errors = append(errors, fmt.Errorf("the duration for stage %d shouldn't be negative", stageNum)) - } - if !s.Target.Valid { - errors = append(errors, fmt.Errorf("stage %d doesn't have a target", stageNum)) - } else if s.Target.Int64 < 0 { - errors = append(errors, fmt.Errorf("the target for stage %d shouldn't be negative", stageNum)) - } + return append(errors, validateStages(vlvc.Stages)...) +} + +// GetMaxVUs returns the absolute maximum number of possible concurrently running VUs +func (vlvc VariableLoopingVUsConfig) GetMaxVUs() int64 { + maxVUs := vlvc.StartVUs.Int64 + for _, s := range vlvc.Stages { + if s.Target.Int64 > maxVUs { + maxVUs = s.Target.Int64 } } + return maxVUs +} - return errors +// GetMaxDuration returns the maximum duration time for this scheduler, including +// the specified iterationTimeout, if the iterations are uninterruptible +func (vlvc VariableLoopingVUsConfig) GetMaxDuration() time.Duration { + var maxDuration types.Duration + for _, s := range vlvc.Stages { + maxDuration += s.Duration.Duration + } + if !vlvc.Interruptible.Bool { + maxDuration += vlvc.IterationTimeout.Duration + } + return time.Duration(maxDuration) } From ad3f6f46a88323001bc6367547ffc81a7b47278d Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Mon, 25 Feb 2019 23:40:53 +0200 Subject: [PATCH 06/36] Fix linter issues --- lib/options.go | 10 +++++----- lib/scheduler/schedulers_test.go | 8 ++++---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/lib/options.go b/lib/options.go index a74e00acbe9..610550e4b2a 100644 --- a/lib/options.go +++ b/lib/options.go @@ -326,15 +326,15 @@ func (o Options) Apply(opts Options) Options { // Still, if more than one of those options is simultaneously specified in the same // config tier, they will be preserved, so the validation after we've consolidated // all of the options can return an error. - if opts.Duration.Valid || opts.Iterations.Valid || opts.Stages != nil || o.Execution != nil { - //TODO: uncomment this after we start using the new schedulers - /* + //TODO: uncomment this after we start using the new schedulers + /* + if opts.Duration.Valid || opts.Iterations.Valid || opts.Stages != nil || o.Execution != nil { o.Duration = types.NewNullDuration(0, false) o.Iterations = null.NewInt(0, false) o.Stages = nil o.Execution = nil - */ - } + } + */ if opts.Duration.Valid { o.Duration = opts.Duration diff --git a/lib/scheduler/schedulers_test.go b/lib/scheduler/schedulers_test.go index f6aa225df90..dc42e542a13 100644 --- a/lib/scheduler/schedulers_test.go +++ b/lib/scheduler/schedulers_test.go @@ -73,8 +73,8 @@ var testCases = []configMapTestCase{ sched.IterationTimeout = types.NullDurationFrom(15 * time.Second) sched.StartVUs = null.IntFrom(20) sched.Stages = []Stage{ - Stage{Target: null.IntFrom(30), Duration: types.NullDurationFrom(60 * time.Second)}, - Stage{Target: null.IntFrom(10), Duration: types.NullDurationFrom(120 * time.Second)}, + {Target: null.IntFrom(30), Duration: types.NullDurationFrom(60 * time.Second)}, + {Target: null.IntFrom(10), Duration: types.NullDurationFrom(120 * time.Second)}, } require.Equal(t, cm, ConfigMap{"varloops": sched}) assert.Equal(t, int64(30), cm["varloops"].GetMaxVUs()) @@ -160,8 +160,8 @@ var testCases = []configMapTestCase{ sched := NewVariableArrivalRateConfig("varrival") sched.StartRate = null.IntFrom(10) sched.Stages = []Stage{ - Stage{Target: null.IntFrom(30), Duration: types.NullDurationFrom(180 * time.Second)}, - Stage{Target: null.IntFrom(10), Duration: types.NullDurationFrom(300 * time.Second)}, + {Target: null.IntFrom(30), Duration: types.NullDurationFrom(180 * time.Second)}, + {Target: null.IntFrom(10), Duration: types.NullDurationFrom(300 * time.Second)}, } sched.TimeUnit = types.NullDurationFrom(30 * time.Second) sched.PreAllocatedVUs = null.IntFrom(20) From 62c910c305090c2e3d6f94c3f7ea9f6166cd08d6 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Tue, 26 Feb 2019 08:25:20 +0200 Subject: [PATCH 07/36] Remove the debug output --- cmd/run.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index 81b6b157b3e..ba63d5a1b6b 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -37,8 +37,6 @@ import ( "syscall" "time" - "github.com/davecgh/go-spew/spew" - "github.com/loadimpact/k6/api" "github.com/loadimpact/k6/core" "github.com/loadimpact/k6/core/local" @@ -169,7 +167,6 @@ a commandline interface for interacting with it.`, } //TODO: move a bunch of the logic above to a config "constructor" and to the Validate() method - spew.Dump(conf.Execution) if errList := conf.Validate(); len(errList) != 0 { errMsg := []string{"There were problems with the specified script configuration:"} From 8113438105eef43a43dd3caf50c9627127f9b5cd Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Tue, 26 Feb 2019 14:37:05 +0200 Subject: [PATCH 08/36] Clean up and add more tests --- lib/scheduler/constant_looping_vus.go | 21 ++-------- lib/scheduler/helpers_test.go | 56 +++++++++++++++++++++++++++ lib/scheduler/interfaces.go | 2 +- lib/scheduler/schedulers_test.go | 5 ++- 4 files changed, 63 insertions(+), 21 deletions(-) create mode 100644 lib/scheduler/helpers_test.go diff --git a/lib/scheduler/constant_looping_vus.go b/lib/scheduler/constant_looping_vus.go index 29cc5f2afa1..a14fb21c544 100644 --- a/lib/scheduler/constant_looping_vus.go +++ b/lib/scheduler/constant_looping_vus.go @@ -72,21 +72,6 @@ func (lcv ConstantLoopingVUsConfig) GetMaxDuration() time.Duration { return time.Duration(maxDuration) } -// Split divides the VUS as best it can, but keeps the same duration -func (lcv ConstantLoopingVUsConfig) Split(percentages []float64) ([]Config, error) { - if err := checkPercentagesSum(percentages); err != nil { - return nil, err - } - configs := make([]Config, len(percentages)) - for i, p := range percentages { - //TODO: figure out a better approach for the proportional distribution - // of the VUs (which are indivisible items)... - // Some sort of "pick closest match to percentage and adjust remaining"? - configs[i] = &ConstantLoopingVUsConfig{ - BaseConfig: *lcv.BaseConfig.CopyWithPercentage(p), - VUs: null.IntFrom(int64(float64(lcv.VUs.Int64) / p)), - Duration: lcv.Duration, - } - } - return configs, nil -} +//TODO: figure out the most accurate algorithm for the proportional distribution +// of the VUs (which are indivisible items)... Some sort of "pick closest match +// to percentage and adjust remaining"? diff --git a/lib/scheduler/helpers_test.go b/lib/scheduler/helpers_test.go new file mode 100644 index 00000000000..cafb7a99e9c --- /dev/null +++ b/lib/scheduler/helpers_test.go @@ -0,0 +1,56 @@ +package scheduler + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCheckPercentagesSum(t *testing.T) { + t.Parallel() + assert.NoError(t, checkPercentagesSum([]float64{100})) + assert.NoError(t, checkPercentagesSum([]float64{50, 50})) + assert.NoError(t, checkPercentagesSum([]float64{100.0 / 3, 100.0 / 3, 100.0 / 3})) + assert.NoError(t, checkPercentagesSum([]float64{33.33, 33.33, 33.34})) + + assert.Error(t, checkPercentagesSum([]float64{})) + assert.Error(t, checkPercentagesSum([]float64{100 / 3, 100 / 3, 100 / 3})) + assert.Error(t, checkPercentagesSum([]float64{33.33, 33.33, 33.33})) + assert.Error(t, checkPercentagesSum([]float64{40, 40, 40})) +} + +func TestStrictJSONUnmarshal(t *testing.T) { + t.Parallel() + type someElement struct { + Data int `json:"data"` + Props map[string]string `json:"props"` + } + + testCases := []struct { + data string + expectedError bool + destination interface{} + expectedResult interface{} + }{ + {``, true, &someElement{}, nil}, + {`123`, true, &someElement{}, nil}, + {`"blah"`, true, &someElement{}, nil}, + {`null`, false, &someElement{}, &someElement{}}, + {`{"data": 123, "props": {"test": "mest"}}`, false, &someElement{}, &someElement{123, map[string]string{"test": "mest"}}}, + {`{"data": 123, "props": {"test": "mest"}}asdg`, true, &someElement{}, nil}, + } + for i, tc := range testCases { + tc := tc + t.Run(fmt.Sprintf("TestCase#%d", i), func(t *testing.T) { + err := strictJSONUnmarshal([]byte(tc.data), &tc.destination) + if tc.expectedError { + require.Error(t, err) + return + } + require.NoError(t, err) + assert.Equal(t, tc.expectedResult, tc.destination) + }) + } +} diff --git a/lib/scheduler/interfaces.go b/lib/scheduler/interfaces.go index 71868160df1..d1a79e61401 100644 --- a/lib/scheduler/interfaces.go +++ b/lib/scheduler/interfaces.go @@ -9,5 +9,5 @@ type Config interface { GetMaxVUs() int64 GetMaxDuration() time.Duration // includes max timeouts, to allow us to share VUs between schedulers in the future //TODO: Split(percentages []float64) ([]Config, error) - //TODO: String() or some other method that would be used for priting descriptions of the currently running schedulers for the UI? + //TODO: String() method that could be used for priting descriptions of the currently running schedulers for the UI? } diff --git a/lib/scheduler/schedulers_test.go b/lib/scheduler/schedulers_test.go index dc42e542a13..12259c64c86 100644 --- a/lib/scheduler/schedulers_test.go +++ b/lib/scheduler/schedulers_test.go @@ -19,7 +19,7 @@ type configMapTestCase struct { customValidator func(t *testing.T, cm ConfigMap) } -var testCases = []configMapTestCase{ +var configMapTestCases = []configMapTestCase{ {"", true, false, nil}, {"1234", true, false, nil}, {"asdf", true, false, nil}, @@ -183,7 +183,8 @@ var testCases = []configMapTestCase{ } func TestConfigMapParsingAndValidation(t *testing.T) { - for i, tc := range testCases { + t.Parallel() + for i, tc := range configMapTestCases { tc := tc t.Run(fmt.Sprintf("TestCase#%d", i), func(t *testing.T) { var result ConfigMap From ce1c9677ff7e69f7e9fb14dbf33df5fbb0407642 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Tue, 26 Feb 2019 15:02:30 +0200 Subject: [PATCH 09/36] Restore the Split() method --- lib/scheduler/constant_looping_vus.go | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/lib/scheduler/constant_looping_vus.go b/lib/scheduler/constant_looping_vus.go index a14fb21c544..29cc5f2afa1 100644 --- a/lib/scheduler/constant_looping_vus.go +++ b/lib/scheduler/constant_looping_vus.go @@ -72,6 +72,21 @@ func (lcv ConstantLoopingVUsConfig) GetMaxDuration() time.Duration { return time.Duration(maxDuration) } -//TODO: figure out the most accurate algorithm for the proportional distribution -// of the VUs (which are indivisible items)... Some sort of "pick closest match -// to percentage and adjust remaining"? +// Split divides the VUS as best it can, but keeps the same duration +func (lcv ConstantLoopingVUsConfig) Split(percentages []float64) ([]Config, error) { + if err := checkPercentagesSum(percentages); err != nil { + return nil, err + } + configs := make([]Config, len(percentages)) + for i, p := range percentages { + //TODO: figure out a better approach for the proportional distribution + // of the VUs (which are indivisible items)... + // Some sort of "pick closest match to percentage and adjust remaining"? + configs[i] = &ConstantLoopingVUsConfig{ + BaseConfig: *lcv.BaseConfig.CopyWithPercentage(p), + VUs: null.IntFrom(int64(float64(lcv.VUs.Int64) / p)), + Duration: lcv.Duration, + } + } + return configs, nil +} From 2451ead1f16984116fbff6782d4c070fae0faef7 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Thu, 28 Feb 2019 13:36:56 +0200 Subject: [PATCH 10/36] Add copyright notices --- lib/scheduler/base_config.go | 20 ++++++++++++++++++++ lib/scheduler/configmap.go | 20 ++++++++++++++++++++ lib/scheduler/constant_arrival_rate.go | 20 ++++++++++++++++++++ lib/scheduler/constant_looping_vus.go | 20 ++++++++++++++++++++ lib/scheduler/helpers.go | 20 ++++++++++++++++++++ lib/scheduler/helpers_test.go | 20 ++++++++++++++++++++ lib/scheduler/interfaces.go | 20 ++++++++++++++++++++ lib/scheduler/per_vu_iterations.go | 20 ++++++++++++++++++++ lib/scheduler/schedulers_test.go | 20 ++++++++++++++++++++ lib/scheduler/shared_iterations.go | 20 ++++++++++++++++++++ lib/scheduler/variable_arrival_rate.go | 20 ++++++++++++++++++++ lib/scheduler/variable_looping_vus.go | 20 ++++++++++++++++++++ 12 files changed, 240 insertions(+) diff --git a/lib/scheduler/base_config.go b/lib/scheduler/base_config.go index 36253cd926c..efba8cb1c3a 100644 --- a/lib/scheduler/base_config.go +++ b/lib/scheduler/base_config.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package scheduler import ( diff --git a/lib/scheduler/configmap.go b/lib/scheduler/configmap.go index 1f71b16f1c5..2445e6a1931 100644 --- a/lib/scheduler/configmap.go +++ b/lib/scheduler/configmap.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package scheduler import ( diff --git a/lib/scheduler/constant_arrival_rate.go b/lib/scheduler/constant_arrival_rate.go index 21dc3db7184..0234c3a3465 100644 --- a/lib/scheduler/constant_arrival_rate.go +++ b/lib/scheduler/constant_arrival_rate.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package scheduler import ( diff --git a/lib/scheduler/constant_looping_vus.go b/lib/scheduler/constant_looping_vus.go index 29cc5f2afa1..dd67e9cdf17 100644 --- a/lib/scheduler/constant_looping_vus.go +++ b/lib/scheduler/constant_looping_vus.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package scheduler import ( diff --git a/lib/scheduler/helpers.go b/lib/scheduler/helpers.go index e85d622a9c5..31bf37bf22a 100644 --- a/lib/scheduler/helpers.go +++ b/lib/scheduler/helpers.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package scheduler import ( diff --git a/lib/scheduler/helpers_test.go b/lib/scheduler/helpers_test.go index cafb7a99e9c..6c07869b6e4 100644 --- a/lib/scheduler/helpers_test.go +++ b/lib/scheduler/helpers_test.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package scheduler import ( diff --git a/lib/scheduler/interfaces.go b/lib/scheduler/interfaces.go index d1a79e61401..764df42ae85 100644 --- a/lib/scheduler/interfaces.go +++ b/lib/scheduler/interfaces.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package scheduler import "time" diff --git a/lib/scheduler/per_vu_iterations.go b/lib/scheduler/per_vu_iterations.go index e744b09b93c..92d98e3428a 100644 --- a/lib/scheduler/per_vu_iterations.go +++ b/lib/scheduler/per_vu_iterations.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package scheduler import ( diff --git a/lib/scheduler/schedulers_test.go b/lib/scheduler/schedulers_test.go index 12259c64c86..c5defae46b5 100644 --- a/lib/scheduler/schedulers_test.go +++ b/lib/scheduler/schedulers_test.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package scheduler import ( diff --git a/lib/scheduler/shared_iterations.go b/lib/scheduler/shared_iterations.go index 9dc6c790104..c09868a50fe 100644 --- a/lib/scheduler/shared_iterations.go +++ b/lib/scheduler/shared_iterations.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package scheduler import ( diff --git a/lib/scheduler/variable_arrival_rate.go b/lib/scheduler/variable_arrival_rate.go index f8fb85dc607..76ba7a51dcb 100644 --- a/lib/scheduler/variable_arrival_rate.go +++ b/lib/scheduler/variable_arrival_rate.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package scheduler import ( diff --git a/lib/scheduler/variable_looping_vus.go b/lib/scheduler/variable_looping_vus.go index 81451b43026..f1ab3de1769 100644 --- a/lib/scheduler/variable_looping_vus.go +++ b/lib/scheduler/variable_looping_vus.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package scheduler import ( From 507c5c5142639742b30e1d1ac5e492b21f0270d5 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Fri, 1 Mar 2019 12:32:37 +0200 Subject: [PATCH 11/36] Improve the execution config generation from shortcuts With this, script runs without any execution params will still have a 1 iteration in 1 VU execution plan, but it will be the per-VU scheduler, so it could eventually be executed both locally and in the cloud. --- cmd/config.go | 35 ++++++++++++++++++++---------- lib/options.go | 2 +- lib/scheduler/per_vu_iterations.go | 10 ++++----- lib/scheduler/shared_iterations.go | 2 +- 4 files changed, 29 insertions(+), 20 deletions(-) diff --git a/cmd/config.go b/cmd/config.go index 025382ac6d4..fa746fe1f0b 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -196,13 +196,20 @@ func buildExecutionConfig(conf Config) (Config, error) { } if conf.Execution != nil { + //TODO: use a custom error type return result, errors.New("specifying both duration and execution is not supported") } - ds := scheduler.NewConstantLoopingVUsConfig(lib.DefaultSchedulerName) - ds.VUs = conf.VUs - ds.Duration = conf.Duration - result.Execution = scheduler.ConfigMap{lib.DefaultSchedulerName: ds} + if conf.Duration.Duration <= 0 { + //TODO: make this an error in the next version + log.Warnf("Specifying infinite duration in this way is deprecated and won't be supported in the future k6 versions") + } else { + ds := scheduler.NewConstantLoopingVUsConfig(lib.DefaultSchedulerName) + ds.VUs = conf.VUs + ds.Duration = conf.Duration + ds.Interruptible = null.NewBool(true, false) // Preserve backwards compatibility + result.Execution = scheduler.ConfigMap{lib.DefaultSchedulerName: ds} + } } else if conf.Stages != nil { if conf.Iterations.Valid { //TODO: make this an error in the next version @@ -220,21 +227,25 @@ func buildExecutionConfig(conf Config) (Config, error) { ds.Stages = append(ds.Stages, scheduler.Stage{Duration: s.Duration, Target: s.Target}) } } + ds.Interruptible = null.NewBool(true, false) // Preserve backwards compatibility result.Execution = scheduler.ConfigMap{lib.DefaultSchedulerName: ds} - } else if conf.Iterations.Valid || conf.Execution == nil { - // Either shared iterations were explicitly specified via the shortcut option, or no execution - // parameters were specified in any way, which will run the default 1 iteration in 1 VU - if conf.Iterations.Valid && conf.Execution != nil { + } else if conf.Iterations.Valid { + if conf.Execution != nil { return conf, errors.New("specifying both iterations and execution is not supported") } + // TODO: maybe add a new flag that will be used as a shortcut to per-VU iterations? ds := scheduler.NewSharedIterationsConfig(lib.DefaultSchedulerName) ds.VUs = conf.VUs - if conf.Iterations.Valid { // TODO: fix where the default iterations value is set... sigh... - ds.Iterations = conf.Iterations - } - + ds.Iterations = conf.Iterations result.Execution = scheduler.ConfigMap{lib.DefaultSchedulerName: ds} + } else if conf.Execution == nil { + // If no execution parameters were specified, we'll create a per-VU iterations config + // with 1 VU and 1 iteration. We're choosing the per-VU config, since that one could also + // be executed both locally, and in the cloud. + result.Execution = scheduler.ConfigMap{ + lib.DefaultSchedulerName: scheduler.NewPerVUIterationsConfig(lib.DefaultSchedulerName), + } } //TODO: validate the config; questions: diff --git a/lib/options.go b/lib/options.go index 610550e4b2a..67c09ed9cd2 100644 --- a/lib/options.go +++ b/lib/options.go @@ -218,7 +218,7 @@ type Options struct { Iterations null.Int `json:"iterations" envconfig:"iterations"` Stages []Stage `json:"stages" envconfig:"stages"` - Execution scheduler.ConfigMap `json:"execution,omitempty" envconfig:"execution"` + Execution scheduler.ConfigMap `json:"execution,omitempty" envconfig:"-"` // Timeouts for the setup() and teardown() functions SetupTimeout types.NullDuration `json:"setupTimeout" envconfig:"setup_timeout"` diff --git a/lib/scheduler/per_vu_iterations.go b/lib/scheduler/per_vu_iterations.go index 92d98e3428a..25501411ea4 100644 --- a/lib/scheduler/per_vu_iterations.go +++ b/lib/scheduler/per_vu_iterations.go @@ -50,6 +50,8 @@ type PerVUIteationsConfig struct { func NewPerVUIterationsConfig(name string) PerVUIteationsConfig { return PerVUIteationsConfig{ BaseConfig: NewBaseConfig(name, perVUIterationsType, false), + VUs: null.NewInt(1, false), + Iterations: null.NewInt(1, false), MaxDuration: types.NewNullDuration(1*time.Hour, false), } } @@ -60,15 +62,11 @@ var _ Config = &PerVUIteationsConfig{} // Validate makes sure all options are configured and valid func (pvic PerVUIteationsConfig) Validate() []error { errors := pvic.BaseConfig.Validate() - if !pvic.VUs.Valid { - errors = append(errors, fmt.Errorf("the number of VUs isn't specified")) - } else if pvic.VUs.Int64 <= 0 { + if pvic.VUs.Int64 <= 0 { errors = append(errors, fmt.Errorf("the number of VUs should be more than 0")) } - if !pvic.Iterations.Valid { - errors = append(errors, fmt.Errorf("the number of iterations isn't specified")) - } else if pvic.Iterations.Int64 <= 0 { + if pvic.Iterations.Int64 <= 0 { errors = append(errors, fmt.Errorf("the number of iterations should be more than 0")) } diff --git a/lib/scheduler/shared_iterations.go b/lib/scheduler/shared_iterations.go index c09868a50fe..d6ef2e2bb92 100644 --- a/lib/scheduler/shared_iterations.go +++ b/lib/scheduler/shared_iterations.go @@ -50,7 +50,7 @@ type SharedIteationsConfig struct { func NewSharedIterationsConfig(name string) SharedIteationsConfig { return SharedIteationsConfig{ BaseConfig: NewBaseConfig(name, sharedIterationsType, false), - VUs: null.NewInt(1, false), //TODO: set defaults in another place? + VUs: null.NewInt(1, false), Iterations: null.NewInt(1, false), MaxDuration: types.NewNullDuration(1*time.Hour, false), } From 54eb41270d130be7f269c575739f38cdb8f3c5cf Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Fri, 1 Mar 2019 19:01:57 +0200 Subject: [PATCH 12/36] Warn if the user specifies only the new execution option --- cmd/config.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cmd/config.go b/cmd/config.go index fa746fe1f0b..21ae22f6fd2 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -239,8 +239,11 @@ func buildExecutionConfig(conf Config) (Config, error) { ds.VUs = conf.VUs ds.Iterations = conf.Iterations result.Execution = scheduler.ConfigMap{lib.DefaultSchedulerName: ds} - } else if conf.Execution == nil { - // If no execution parameters were specified, we'll create a per-VU iterations config + } else if conf.Execution != nil { + //TODO: remove this warning in the next version + log.Warnf("The execution settings are not functional in this k6 release, they will be ignored") + } else { + // No execution parameters whatsoever were specified, so we'll create a per-VU iterations config // with 1 VU and 1 iteration. We're choosing the per-VU config, since that one could also // be executed both locally, and in the cloud. result.Execution = scheduler.ConfigMap{ From 9cab261ba0dab478f38b4ffa8816da18f117f03b Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Fri, 1 Mar 2019 19:17:51 +0200 Subject: [PATCH 13/36] Improve the scheduler config tests --- lib/scheduler/schedulers_test.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/scheduler/schedulers_test.go b/lib/scheduler/schedulers_test.go index c5defae46b5..4f64394069e 100644 --- a/lib/scheduler/schedulers_test.go +++ b/lib/scheduler/schedulers_test.go @@ -121,6 +121,9 @@ var configMapTestCases = []configMapTestCase{ assert.Equal(t, 3630*time.Second, cm["ishared"].GetMaxDuration()) assert.Empty(t, cm["ishared"].Validate()) }}, + {`{"ishared": {"type": "shared-iterations"}}`, false, false, nil}, // Has 1 VU & 1 iter default values + {`{"ishared": {"type": "shared-iterations", "iterations": 20}}`, false, false, nil}, + {`{"ishared": {"type": "shared-iterations", "vus": 10}}`, false, true, nil}, // error because VUs are more than iters {`{"ishared": {"type": "shared-iterations", "iterations": 20, "vus": 10, "maxDuration": "30m"}}`, false, false, nil}, {`{"ishared": {"type": "shared-iterations", "iterations": 20, "vus": 10, "maxDuration": "-3m"}}`, false, true, nil}, {`{"ishared": {"type": "shared-iterations", "iterations": 20, "vus": 10, "maxDuration": "0s"}}`, false, true, nil}, @@ -139,9 +142,10 @@ var configMapTestCases = []configMapTestCase{ assert.Equal(t, 3630*time.Second, cm["ipervu"].GetMaxDuration()) assert.Empty(t, cm["ipervu"].Validate()) }}, + {`{"ipervu": {"type": "per-vu-iterations"}}`, false, false, nil}, // Has 1 VU & 1 iter default values + {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20}}`, false, false, nil}, + {`{"ipervu": {"type": "per-vu-iterations", "vus": 10}}`, false, false, nil}, {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20, "vus": 10}}`, false, false, nil}, - {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20}}`, false, true, nil}, - {`{"ipervu": {"type": "per-vu-iterations", "vus": 10}}`, false, true, nil}, {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20, "vus": 10, "maxDuration": "-3m"}}`, false, true, nil}, {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20, "vus": 10, "maxDuration": "0s"}}`, false, true, nil}, {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20, "vus": -10}}`, false, true, nil}, @@ -207,6 +211,7 @@ func TestConfigMapParsingAndValidation(t *testing.T) { for i, tc := range configMapTestCases { tc := tc t.Run(fmt.Sprintf("TestCase#%d", i), func(t *testing.T) { + t.Logf(tc.rawJSON) var result ConfigMap err := json.Unmarshal([]byte(tc.rawJSON), &result) if tc.expectParseError { From 977b10e76345905c9e2ea5da9385a2d9b20ecce6 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Fri, 1 Mar 2019 19:03:50 +0200 Subject: [PATCH 14/36] Refactor some CLI configs and add some basic configuration tests This is woefully insufficient, but even that was painful to write... Further refactoring and tests will follow, especially relating to JSON/JS configs and mixing configuration between the CLI/env-vars/JS/JSON layers. The effort will hopefully be worth it, since we should be able to reuse these tests to verify that we're not breaking things when we finally tackle https://github.com/loadimpact/k6/issues/883 properly. --- cmd/archive.go | 17 +++- cmd/cloud.go | 15 ++- cmd/config.go | 1 + cmd/config_test.go | 242 +++++++++++++++++++++++++++++++++++++++++++-- cmd/root.go | 3 +- cmd/run.go | 22 +++-- 6 files changed, 279 insertions(+), 21 deletions(-) diff --git a/cmd/archive.go b/cmd/archive.go index ede0553929c..8263715870f 100644 --- a/cmd/archive.go +++ b/cmd/archive.go @@ -25,6 +25,7 @@ import ( "github.com/spf13/afero" "github.com/spf13/cobra" + "github.com/spf13/pflag" ) var archiveOut = "archive.tar" @@ -90,11 +91,19 @@ An archive is a fully self-contained test run, and can be executed identically e }, } +func archiveCmdFlagSet() *pflag.FlagSet { + flags := pflag.NewFlagSet("", pflag.ContinueOnError) + flags.SortFlags = false + flags.AddFlagSet(optionFlagSet()) + flags.AddFlagSet(runtimeOptionFlagSet(false)) + flags.AddFlagSet(configFileFlagSet()) + //TODO: figure out a better way to handle the CLI flags - global variables are not very testable... :/ + flags.StringVarP(&archiveOut, "archive-out", "O", archiveOut, "archive output filename") + return flags +} + func init() { RootCmd.AddCommand(archiveCmd) archiveCmd.Flags().SortFlags = false - archiveCmd.Flags().AddFlagSet(optionFlagSet()) - archiveCmd.Flags().AddFlagSet(runtimeOptionFlagSet(false)) - archiveCmd.Flags().AddFlagSet(configFileFlagSet()) - archiveCmd.Flags().StringVarP(&archiveOut, "archive-out", "O", archiveOut, "archive output filename") + archiveCmd.Flags().AddFlagSet(archiveCmdFlagSet()) } diff --git a/cmd/cloud.go b/cmd/cloud.go index 1850f27d93e..27f3c01c6fc 100644 --- a/cmd/cloud.go +++ b/cmd/cloud.go @@ -36,6 +36,7 @@ import ( "github.com/pkg/errors" "github.com/spf13/afero" "github.com/spf13/cobra" + "github.com/spf13/pflag" log "github.com/sirupsen/logrus" ) @@ -235,10 +236,18 @@ This will execute the test on the Load Impact cloud service. Use "k6 login cloud }, } +func cloudCmdFlagSet() *pflag.FlagSet { + flags := pflag.NewFlagSet("", pflag.ContinueOnError) + flags.SortFlags = false + flags.AddFlagSet(optionFlagSet()) + flags.AddFlagSet(runtimeOptionFlagSet(false)) + //TODO: figure out a better way to handle the CLI flags - global variables are not very testable... :/ + flags.BoolVar(&exitOnRunning, "exit-on-running", exitOnRunning, "exits when test reaches the running status") + return flags +} + func init() { RootCmd.AddCommand(cloudCmd) cloudCmd.Flags().SortFlags = false - cloudCmd.Flags().AddFlagSet(optionFlagSet()) - cloudCmd.Flags().AddFlagSet(runtimeOptionFlagSet(false)) - cloudCmd.Flags().BoolVar(&exitOnRunning, "exit-on-running", exitOnRunning, "exits when test reaches the running status") + cloudCmd.Flags().AddFlagSet(cloudCmdFlagSet()) } diff --git a/cmd/config.go b/cmd/config.go index 21ae22f6fd2..7d92536f2dd 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -48,6 +48,7 @@ var configFile = os.Getenv("K6_CONFIG") // overridden by `-c` flag! // configFileFlagSet returns a FlagSet that contains flags needed for specifying a config file. func configFileFlagSet() *pflag.FlagSet { + //TODO: remove? flags := pflag.NewFlagSet("", 0) flags.StringVarP(&configFile, "config", "c", configFile, "specify config file to read") return flags diff --git a/cmd/config_test.go b/cmd/config_test.go index a82daed850f..b0db5ebde60 100644 --- a/cmd/config_test.go +++ b/cmd/config_test.go @@ -1,7 +1,7 @@ /* * * k6 - a next-generation load testing tool - * Copyright (C) 2016 Load Impact + * Copyright (C) 2019 Load Impact * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as @@ -21,11 +21,25 @@ package cmd import ( + "fmt" + "io/ioutil" "os" + "strings" + "sync" "testing" + "time" + + "github.com/loadimpact/k6/lib/scheduler" + "github.com/loadimpact/k6/lib/types" + + "github.com/spf13/afero" + "github.com/spf13/pflag" "github.com/kelseyhightower/envconfig" + "github.com/loadimpact/k6/lib" + log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "gopkg.in/guregu/null.v3" ) @@ -83,8 +97,227 @@ func TestConfigCmd(t *testing.T) { } } -//TODO: write end-to-end configuration tests - how the config file, in-script options, environment variables and CLI flags are parsed and interact... and how the end result is then propagated back into the script +// A simple logrus hook that could be used to check if log messages were outputted +type simpleLogrusHook struct { + mutex sync.Mutex + levels []log.Level + messageCache []log.Entry +} + +func (smh *simpleLogrusHook) Levels() []log.Level { + return smh.levels +} +func (smh *simpleLogrusHook) Fire(e *log.Entry) error { + smh.mutex.Lock() + defer smh.mutex.Unlock() + smh.messageCache = append(smh.messageCache, *e) + return nil +} + +func (smh *simpleLogrusHook) drain() []log.Entry { + smh.mutex.Lock() + defer smh.mutex.Unlock() + res := smh.messageCache + smh.messageCache = []log.Entry{} + return res +} + +var _ log.Hook = &simpleLogrusHook{} + +// A helper funcion for setting arbitrary environment variables and +// restoring the old ones at the end, usually by deferring the returned callback +//TODO: remove these hacks when we improve the configuration... we shouldn't +// have to mess with the global environment at all... +func setEnv(t *testing.T, newEnv []string) (restoreEnv func()) { + actuallSetEnv := func(env []string) { + os.Clearenv() + for _, e := range env { + val := "" + pair := strings.SplitN(e, "=", 2) + if len(pair) > 1 { + val = pair[1] + } + require.NoError(t, os.Setenv(pair[0], val)) + } + } + oldEnv := os.Environ() + actuallSetEnv(newEnv) + + return func() { + actuallSetEnv(oldEnv) + } +} + +var verifyOneIterPerOneVU = func(t *testing.T, c Config) { + // No config anywhere should result in a 1 VU with a 1 uninterruptible iteration config + sched := c.Execution[lib.DefaultSchedulerName] + require.NotEmpty(t, sched) + require.IsType(t, scheduler.PerVUIteationsConfig{}, sched) + perVuIters, ok := sched.(scheduler.PerVUIteationsConfig) + require.True(t, ok) + assert.Equal(t, null.NewBool(false, false), perVuIters.Interruptible) + assert.Equal(t, null.NewInt(1, false), perVuIters.Iterations) + assert.Equal(t, null.NewInt(1, false), perVuIters.VUs) +} + +var verifySharedIters = func(vus, iters int64) func(t *testing.T, c Config) { + return func(t *testing.T, c Config) { + sched := c.Execution[lib.DefaultSchedulerName] + require.NotEmpty(t, sched) + require.IsType(t, scheduler.SharedIteationsConfig{}, sched) + sharedIterConfig, ok := sched.(scheduler.SharedIteationsConfig) + require.True(t, ok) + assert.Equal(t, null.NewInt(vus, true), sharedIterConfig.VUs) + assert.Equal(t, null.NewInt(iters, true), sharedIterConfig.Iterations) + } +} + +var verifyConstantLoopingVUs = func(vus int64, duration time.Duration) func(t *testing.T, c Config) { + return func(t *testing.T, c Config) { + sched := c.Execution[lib.DefaultSchedulerName] + require.NotEmpty(t, sched) + require.IsType(t, scheduler.ConstantLoopingVUsConfig{}, sched) + clvc, ok := sched.(scheduler.ConstantLoopingVUsConfig) + require.True(t, ok) + assert.Equal(t, null.NewBool(true, false), clvc.Interruptible) + assert.Equal(t, null.NewInt(vus, true), clvc.VUs) + assert.Equal(t, types.NullDurationFrom(duration), clvc.Duration) + } +} + +func mostFlagSets() []*pflag.FlagSet { + // sigh... compromises... + return []*pflag.FlagSet{runCmdFlagSet(), archiveCmdFlagSet(), cloudCmdFlagSet()} +} + +// exp contains the different events or errors we expect our test case to trigger. +// for space and clarity, we use the fact that by default, all of the struct values are false +type exp struct { + cliError bool + consolidationError bool + validationErrors bool + logWarning bool //TODO: remove in the next version? +} + +// A hell of a complicated test case, that still doesn't test things fully... +type configConsolidationTestCase struct { + cliFlagSets []*pflag.FlagSet + cliFlagValues []string + env []string + runnerOptions *lib.Options + //TODO: test the JSON config as well... after most of https://github.com/loadimpact/k6/issues/883#issuecomment-468646291 is fixed + expected exp + customValidator func(t *testing.T, c Config) +} + +var configConsolidationTestCases = []configConsolidationTestCase{ + // Check that no options will result in 1 VU 1 iter value for execution + {mostFlagSets(), nil, nil, nil, exp{}, verifyOneIterPerOneVU}, + // Verify some CLI errors + {mostFlagSets(), []string{"--blah", "blah"}, nil, nil, exp{cliError: true}, nil}, + {mostFlagSets(), []string{"--duration", "blah"}, nil, nil, exp{cliError: true}, nil}, + {mostFlagSets(), []string{"--iterations", "blah"}, nil, nil, exp{cliError: true}, nil}, + {mostFlagSets(), []string{"--execution", ""}, nil, nil, exp{cliError: true}, nil}, + {mostFlagSets(), []string{"--stage", "10:20s"}, nil, nil, exp{cliError: true}, nil}, + // Check if CLI shortcuts generate correct execution values + {mostFlagSets(), []string{"--vus", "1", "--iterations", "5"}, nil, nil, exp{}, verifySharedIters(1, 5)}, + {mostFlagSets(), []string{"-u", "2", "-i", "6"}, nil, nil, exp{}, verifySharedIters(2, 6)}, + {mostFlagSets(), []string{"-u", "3", "-d", "30s"}, nil, nil, exp{}, verifyConstantLoopingVUs(3, 30*time.Second)}, + {mostFlagSets(), []string{"-u", "4", "--duration", "60s"}, nil, nil, exp{}, verifyConstantLoopingVUs(4, 1*time.Minute)}, + //TODO: verify stages + // This should get a validation error since VUs are more than the shared iterations + {mostFlagSets(), []string{"--vus", "10", "-i", "6"}, nil, nil, exp{validationErrors: true}, verifySharedIters(10, 6)}, + // These should emit a warning + {mostFlagSets(), []string{"-u", "1", "-i", "6", "-d", "10s"}, nil, nil, exp{logWarning: true}, nil}, + {mostFlagSets(), []string{"-u", "2", "-d", "10s", "-s", "10s:20"}, nil, nil, exp{logWarning: true}, nil}, + {mostFlagSets(), []string{"-u", "3", "-i", "5", "-s", "10s:20"}, nil, nil, exp{logWarning: true}, nil}, + {mostFlagSets(), []string{"-u", "3", "-d", "0"}, nil, nil, exp{logWarning: true}, nil}, + // Test if environment variable shortcuts are working as expected + {mostFlagSets(), nil, []string{"K6_VUS=5", "K6_ITERATIONS=15"}, nil, exp{}, verifySharedIters(5, 15)}, + {mostFlagSets(), nil, []string{"K6_VUS=10", "K6_DURATION=20s"}, nil, exp{}, verifyConstantLoopingVUs(10, 20*time.Second)}, + + //TODO: test combinations between options and levels + //TODO: test the future full overwriting of the duration/iterations/stages/execution options + + // Just in case, verify that no options will result in the same 1 vu 1 iter config + {mostFlagSets(), nil, nil, nil, exp{}, verifyOneIterPerOneVU}, + //TODO: test for differences between flagsets + //TODO: more tests in general... +} + +func TestConfigConsolidation(t *testing.T) { + logHook := simpleLogrusHook{levels: []log.Level{log.WarnLevel}} + log.AddHook(&logHook) + log.SetOutput(ioutil.Discard) + defer log.SetOutput(os.Stderr) + + runTestCase := func(t *testing.T, testCase configConsolidationTestCase, flagSet *pflag.FlagSet) { + logHook.drain() + + restoreEnv := setEnv(t, testCase.env) + defer restoreEnv() + + //TODO: also remove these hacks when we improve the configuration... + getTestCaseCliConf := func() (Config, error) { + if err := flagSet.Parse(testCase.cliFlagValues); err != nil { + return Config{}, err + } + if flagSet.Lookup("out") != nil { + return getConfig(flagSet) + } + opts, errOpts := getOptions(flagSet) + return Config{Options: opts}, errOpts + } + + cliConf, err := getTestCaseCliConf() + if testCase.expected.cliError { + require.Error(t, err) + return + } + require.NoError(t, err) + + var runner lib.Runner + if testCase.runnerOptions != nil { + runner = &lib.MiniRunner{Options: *testCase.runnerOptions} + } + fs := afero.NewMemMapFs() //TODO: test JSON configs as well! + result, err := getConsolidatedConfig(fs, cliConf, runner) + if testCase.expected.consolidationError { + require.Error(t, err) + return + } + require.NoError(t, err) + warnings := logHook.drain() + if testCase.expected.logWarning { + assert.NotEmpty(t, warnings) + } else { + assert.Empty(t, warnings) + } + + validationErrors := result.Validate() + if testCase.expected.validationErrors { + assert.NotEmpty(t, validationErrors) + } else { + assert.Empty(t, validationErrors) + } + + if testCase.customValidator != nil { + testCase.customValidator(t, result) + } + } + + for tcNum, testCase := range configConsolidationTestCases { + for fsNum, flagSet := range testCase.cliFlagSets { + // I want to paralelize this, but I cannot... due to global variables and other + // questionable architectural choices... :| + t.Run( + fmt.Sprintf("TestCase#%d_FlagSet#%d", tcNum, fsNum), + func(t *testing.T) { runTestCase(t, testCase, flagSet) }, + ) + } + } +} func TestConfigEnv(t *testing.T) { testdata := map[struct{ Name, Key string }]map[string]func(Config){ {"Linger", "K6_LINGER"}: { @@ -134,8 +367,3 @@ func TestConfigApply(t *testing.T) { assert.Equal(t, []string{"influxdb", "json"}, conf.Out) }) } - -func TestBuildExecutionConfig(t *testing.T) { - //TODO: test the current config building and constructing of the execution plan, and the emitted warnings - //TODO: test the future full overwriting of the duration/iterations/stages/execution options -} diff --git a/cmd/root.go b/cmd/root.go index 521ba5c4be3..715e2cb2f78 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -100,12 +100,13 @@ func init() { defaultConfigPathMsg = fmt.Sprintf(" (default %s)", filepath.Join(configFolders[0].Path, configFilename)) } + //TODO: figure out a better way to handle the CLI flags - global variables are not very testable... :/ RootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "enable debug logging") RootCmd.PersistentFlags().BoolVarP(&quiet, "quiet", "q", false, "disable progress updates") RootCmd.PersistentFlags().BoolVar(&noColor, "no-color", false, "disable colored output") RootCmd.PersistentFlags().StringVar(&logFmt, "logformat", "", "log output format") RootCmd.PersistentFlags().StringVarP(&address, "address", "a", "localhost:6565", "address for the api server") - RootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", "", "config file"+defaultConfigPathMsg) + RootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", "", "config file"+defaultConfigPathMsg) //TODO: figure out and fix? must(cobra.MarkFlagFilename(RootCmd.PersistentFlags(), "config")) } diff --git a/cmd/run.go b/cmd/run.go index ba63d5a1b6b..6e1c1612906 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -49,6 +49,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/afero" "github.com/spf13/cobra" + "github.com/spf13/pflag" null "gopkg.in/guregu/null.v3" ) @@ -65,6 +66,7 @@ const ( ) var ( + //TODO: fix this, global variables are not very testable... runType = os.Getenv("K6_TYPE") runNoSetup = os.Getenv("K6_NO_SETUP") != "" runNoTeardown = os.Getenv("K6_NO_TEARDOWN") != "" @@ -472,16 +474,24 @@ a commandline interface for interacting with it.`, }, } +func runCmdFlagSet() *pflag.FlagSet { + flags := pflag.NewFlagSet("", pflag.ContinueOnError) + flags.SortFlags = false + flags.AddFlagSet(optionFlagSet()) + flags.AddFlagSet(runtimeOptionFlagSet(true)) + flags.AddFlagSet(configFlagSet()) + //TODO: figure out a better way to handle the CLI flags - global variables are not very testable... :/ + flags.StringVarP(&runType, "type", "t", runType, "override file `type`, \"js\" or \"archive\"") + flags.BoolVar(&runNoSetup, "no-setup", runNoSetup, "don't run setup()") + flags.BoolVar(&runNoTeardown, "no-teardown", runNoTeardown, "don't run teardown()") + return flags +} + func init() { RootCmd.AddCommand(runCmd) runCmd.Flags().SortFlags = false - runCmd.Flags().AddFlagSet(optionFlagSet()) - runCmd.Flags().AddFlagSet(runtimeOptionFlagSet(true)) - runCmd.Flags().AddFlagSet(configFlagSet()) - runCmd.Flags().StringVarP(&runType, "type", "t", runType, "override file `type`, \"js\" or \"archive\"") - runCmd.Flags().BoolVar(&runNoSetup, "no-setup", runNoSetup, "don't run setup()") - runCmd.Flags().BoolVar(&runNoTeardown, "no-teardown", runNoTeardown, "don't run teardown()") + runCmd.Flags().AddFlagSet(runCmdFlagSet()) } // Reads a source file from any supported destination. From d1943ecc5ae8ea9da7236f4c8580722e7ececccb Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Sat, 2 Mar 2019 00:49:54 +0200 Subject: [PATCH 15/36] Improve the readability of the config test cases --- cmd/config_test.go | 68 +++++++++++++++++++++++++++------------------- 1 file changed, 40 insertions(+), 28 deletions(-) diff --git a/cmd/config_test.go b/cmd/config_test.go index b0db5ebde60..85b72395f39 100644 --- a/cmd/config_test.go +++ b/cmd/config_test.go @@ -158,6 +158,7 @@ var verifyOneIterPerOneVU = func(t *testing.T, c Config) { assert.Equal(t, null.NewBool(false, false), perVuIters.Interruptible) assert.Equal(t, null.NewInt(1, false), perVuIters.Iterations) assert.Equal(t, null.NewInt(1, false), perVuIters.VUs) + //TODO: verify shortcut options as well? } var verifySharedIters = func(vus, iters int64) func(t *testing.T, c Config) { @@ -169,6 +170,7 @@ var verifySharedIters = func(vus, iters int64) func(t *testing.T, c Config) { require.True(t, ok) assert.Equal(t, null.NewInt(vus, true), sharedIterConfig.VUs) assert.Equal(t, null.NewInt(iters, true), sharedIterConfig.Iterations) + //TODO: verify shortcut options as well? } } @@ -182,6 +184,7 @@ var verifyConstantLoopingVUs = func(vus int64, duration time.Duration) func(t *t assert.Equal(t, null.NewBool(true, false), clvc.Interruptible) assert.Equal(t, null.NewInt(vus, true), clvc.VUs) assert.Equal(t, types.NullDurationFrom(duration), clvc.Duration) + //TODO: verify shortcut options as well? } } @@ -190,6 +193,14 @@ func mostFlagSets() []*pflag.FlagSet { return []*pflag.FlagSet{runCmdFlagSet(), archiveCmdFlagSet(), cloudCmdFlagSet()} } +type opts struct { + cliFlagSets []*pflag.FlagSet + cli []string + env []string + runner *lib.Options + //TODO: test the JSON config as well... after most of https://github.com/loadimpact/k6/issues/883#issuecomment-468646291 is fixed +} + // exp contains the different events or errors we expect our test case to trigger. // for space and clarity, we use the fact that by default, all of the struct values are false type exp struct { @@ -201,46 +212,42 @@ type exp struct { // A hell of a complicated test case, that still doesn't test things fully... type configConsolidationTestCase struct { - cliFlagSets []*pflag.FlagSet - cliFlagValues []string - env []string - runnerOptions *lib.Options - //TODO: test the JSON config as well... after most of https://github.com/loadimpact/k6/issues/883#issuecomment-468646291 is fixed + options opts expected exp customValidator func(t *testing.T, c Config) } var configConsolidationTestCases = []configConsolidationTestCase{ // Check that no options will result in 1 VU 1 iter value for execution - {mostFlagSets(), nil, nil, nil, exp{}, verifyOneIterPerOneVU}, + {opts{}, exp{}, verifyOneIterPerOneVU}, // Verify some CLI errors - {mostFlagSets(), []string{"--blah", "blah"}, nil, nil, exp{cliError: true}, nil}, - {mostFlagSets(), []string{"--duration", "blah"}, nil, nil, exp{cliError: true}, nil}, - {mostFlagSets(), []string{"--iterations", "blah"}, nil, nil, exp{cliError: true}, nil}, - {mostFlagSets(), []string{"--execution", ""}, nil, nil, exp{cliError: true}, nil}, - {mostFlagSets(), []string{"--stage", "10:20s"}, nil, nil, exp{cliError: true}, nil}, + {opts{cli: []string{"--blah", "blah"}}, exp{cliError: true}, nil}, + {opts{cli: []string{"--duration", "blah"}}, exp{cliError: true}, nil}, + {opts{cli: []string{"--iterations", "blah"}}, exp{cliError: true}, nil}, + {opts{cli: []string{"--execution", ""}}, exp{cliError: true}, nil}, + {opts{cli: []string{"--stage", "10:20s"}}, exp{cliError: true}, nil}, // Check if CLI shortcuts generate correct execution values - {mostFlagSets(), []string{"--vus", "1", "--iterations", "5"}, nil, nil, exp{}, verifySharedIters(1, 5)}, - {mostFlagSets(), []string{"-u", "2", "-i", "6"}, nil, nil, exp{}, verifySharedIters(2, 6)}, - {mostFlagSets(), []string{"-u", "3", "-d", "30s"}, nil, nil, exp{}, verifyConstantLoopingVUs(3, 30*time.Second)}, - {mostFlagSets(), []string{"-u", "4", "--duration", "60s"}, nil, nil, exp{}, verifyConstantLoopingVUs(4, 1*time.Minute)}, + {opts{cli: []string{"--vus", "1", "--iterations", "5"}}, exp{}, verifySharedIters(1, 5)}, + {opts{cli: []string{"-u", "2", "-i", "6"}}, exp{}, verifySharedIters(2, 6)}, + {opts{cli: []string{"-u", "3", "-d", "30s"}}, exp{}, verifyConstantLoopingVUs(3, 30*time.Second)}, + {opts{cli: []string{"-u", "4", "--duration", "60s"}}, exp{}, verifyConstantLoopingVUs(4, 1*time.Minute)}, //TODO: verify stages // This should get a validation error since VUs are more than the shared iterations - {mostFlagSets(), []string{"--vus", "10", "-i", "6"}, nil, nil, exp{validationErrors: true}, verifySharedIters(10, 6)}, + {opts{cli: []string{"--vus", "10", "-i", "6"}}, exp{validationErrors: true}, verifySharedIters(10, 6)}, // These should emit a warning - {mostFlagSets(), []string{"-u", "1", "-i", "6", "-d", "10s"}, nil, nil, exp{logWarning: true}, nil}, - {mostFlagSets(), []string{"-u", "2", "-d", "10s", "-s", "10s:20"}, nil, nil, exp{logWarning: true}, nil}, - {mostFlagSets(), []string{"-u", "3", "-i", "5", "-s", "10s:20"}, nil, nil, exp{logWarning: true}, nil}, - {mostFlagSets(), []string{"-u", "3", "-d", "0"}, nil, nil, exp{logWarning: true}, nil}, + {opts{cli: []string{"-u", "1", "-i", "6", "-d", "10s"}}, exp{logWarning: true}, nil}, + {opts{cli: []string{"-u", "2", "-d", "10s", "-s", "10s:20"}}, exp{logWarning: true}, nil}, + {opts{cli: []string{"-u", "3", "-i", "5", "-s", "10s:20"}}, exp{logWarning: true}, nil}, + {opts{cli: []string{"-u", "3", "-d", "0"}}, exp{logWarning: true}, nil}, // Test if environment variable shortcuts are working as expected - {mostFlagSets(), nil, []string{"K6_VUS=5", "K6_ITERATIONS=15"}, nil, exp{}, verifySharedIters(5, 15)}, - {mostFlagSets(), nil, []string{"K6_VUS=10", "K6_DURATION=20s"}, nil, exp{}, verifyConstantLoopingVUs(10, 20*time.Second)}, + {opts{env: []string{"K6_VUS=5", "K6_ITERATIONS=15"}}, exp{}, verifySharedIters(5, 15)}, + {opts{env: []string{"K6_VUS=10", "K6_DURATION=20s"}}, exp{}, verifyConstantLoopingVUs(10, 20*time.Second)}, //TODO: test combinations between options and levels //TODO: test the future full overwriting of the duration/iterations/stages/execution options // Just in case, verify that no options will result in the same 1 vu 1 iter config - {mostFlagSets(), nil, nil, nil, exp{}, verifyOneIterPerOneVU}, + {opts{}, exp{}, verifyOneIterPerOneVU}, //TODO: test for differences between flagsets //TODO: more tests in general... } @@ -252,14 +259,15 @@ func TestConfigConsolidation(t *testing.T) { defer log.SetOutput(os.Stderr) runTestCase := func(t *testing.T, testCase configConsolidationTestCase, flagSet *pflag.FlagSet) { + t.Logf("Test with opts=%#v and exp=%#v\n", testCase.options, testCase.expected) logHook.drain() - restoreEnv := setEnv(t, testCase.env) + restoreEnv := setEnv(t, testCase.options.env) defer restoreEnv() //TODO: also remove these hacks when we improve the configuration... getTestCaseCliConf := func() (Config, error) { - if err := flagSet.Parse(testCase.cliFlagValues); err != nil { + if err := flagSet.Parse(testCase.options.cli); err != nil { return Config{}, err } if flagSet.Lookup("out") != nil { @@ -277,8 +285,8 @@ func TestConfigConsolidation(t *testing.T) { require.NoError(t, err) var runner lib.Runner - if testCase.runnerOptions != nil { - runner = &lib.MiniRunner{Options: *testCase.runnerOptions} + if testCase.options.runner != nil { + runner = &lib.MiniRunner{Options: *testCase.options.runner} } fs := afero.NewMemMapFs() //TODO: test JSON configs as well! result, err := getConsolidatedConfig(fs, cliConf, runner) @@ -308,7 +316,11 @@ func TestConfigConsolidation(t *testing.T) { } for tcNum, testCase := range configConsolidationTestCases { - for fsNum, flagSet := range testCase.cliFlagSets { + flagSets := testCase.options.cliFlagSets + if flagSets == nil { // handle the most common case + flagSets = mostFlagSets() + } + for fsNum, flagSet := range flagSets { // I want to paralelize this, but I cannot... due to global variables and other // questionable architectural choices... :| t.Run( From effbf30641634ca223c562b561b78b90bbea16dd Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Tue, 5 Mar 2019 14:29:40 +0200 Subject: [PATCH 16/36] Work arround some strage appveyor environment variable issues For some reason, when running the tests on appveyor, the `os.Environ()` result contains the value `=C:=C:\gopath\src\github.com\loadimpact\k6` :/ ... --- cmd/config_test.go | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/cmd/config_test.go b/cmd/config_test.go index 85b72395f39..bf8b66b5965 100644 --- a/cmd/config_test.go +++ b/cmd/config_test.go @@ -129,7 +129,7 @@ var _ log.Hook = &simpleLogrusHook{} //TODO: remove these hacks when we improve the configuration... we shouldn't // have to mess with the global environment at all... func setEnv(t *testing.T, newEnv []string) (restoreEnv func()) { - actuallSetEnv := func(env []string) { + actuallSetEnv := func(env []string, abortOnSetErr bool) { os.Clearenv() for _, e := range env { val := "" @@ -137,14 +137,22 @@ func setEnv(t *testing.T, newEnv []string) (restoreEnv func()) { if len(pair) > 1 { val = pair[1] } - require.NoError(t, os.Setenv(pair[0], val)) + err := os.Setenv(pair[0], val) + if abortOnSetErr { + require.NoError(t, err) + } else if err != nil { + t.Logf( + "Received a non-aborting but unexpected error '%s' when setting env.var '%s' to '%s'", + err, pair[0], val, + ) + } } } oldEnv := os.Environ() - actuallSetEnv(newEnv) + actuallSetEnv(newEnv, true) return func() { - actuallSetEnv(oldEnv) + actuallSetEnv(oldEnv, false) } } From 2120a124b13703314b2a1cf60b573620f8dfd831 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Tue, 5 Mar 2019 19:09:11 +0200 Subject: [PATCH 17/36] Fix minor issues with the structure of the new config tests --- cmd/common.go | 2 + cmd/config_test.go | 88 ++++++++++++++---------------------- lib/testutils/logrus_hook.go | 39 ++++++++++++++++ 3 files changed, 75 insertions(+), 54 deletions(-) create mode 100644 lib/testutils/logrus_hook.go diff --git a/cmd/common.go b/cmd/common.go index 0de51ed3038..d3964bce45f 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -68,6 +68,8 @@ func (w consoleWriter) Write(p []byte) (n int, err error) { return } +//TODO: refactor the CLI config so these functions aren't needed - they +// can mask errors by failing only at runtime, not at compile time func getNullBool(flags *pflag.FlagSet, key string) null.Bool { v, err := flags.GetBool(key) if err != nil { diff --git a/cmd/config_test.go b/cmd/config_test.go index bf8b66b5965..3cc01b1fd7d 100644 --- a/cmd/config_test.go +++ b/cmd/config_test.go @@ -25,11 +25,11 @@ import ( "io/ioutil" "os" "strings" - "sync" "testing" "time" "github.com/loadimpact/k6/lib/scheduler" + "github.com/loadimpact/k6/lib/testutils" "github.com/loadimpact/k6/lib/types" "github.com/spf13/afero" @@ -97,33 +97,6 @@ func TestConfigCmd(t *testing.T) { } } -// A simple logrus hook that could be used to check if log messages were outputted -type simpleLogrusHook struct { - mutex sync.Mutex - levels []log.Level - messageCache []log.Entry -} - -func (smh *simpleLogrusHook) Levels() []log.Level { - return smh.levels -} -func (smh *simpleLogrusHook) Fire(e *log.Entry) error { - smh.mutex.Lock() - defer smh.mutex.Unlock() - smh.messageCache = append(smh.messageCache, *e) - return nil -} - -func (smh *simpleLogrusHook) drain() []log.Entry { - smh.mutex.Lock() - defer smh.mutex.Unlock() - res := smh.messageCache - smh.messageCache = []log.Entry{} - return res -} - -var _ log.Hook = &simpleLogrusHook{} - // A helper funcion for setting arbitrary environment variables and // restoring the old ones at the end, usually by deferring the returned callback //TODO: remove these hacks when we improve the configuration... we shouldn't @@ -156,7 +129,7 @@ func setEnv(t *testing.T, newEnv []string) (restoreEnv func()) { } } -var verifyOneIterPerOneVU = func(t *testing.T, c Config) { +func verifyOneIterPerOneVU(t *testing.T, c Config) { // No config anywhere should result in a 1 VU with a 1 uninterruptible iteration config sched := c.Execution[lib.DefaultSchedulerName] require.NotEmpty(t, sched) @@ -169,7 +142,7 @@ var verifyOneIterPerOneVU = func(t *testing.T, c Config) { //TODO: verify shortcut options as well? } -var verifySharedIters = func(vus, iters int64) func(t *testing.T, c Config) { +func verifySharedIters(vus, iters int64) func(t *testing.T, c Config) { return func(t *testing.T, c Config) { sched := c.Execution[lib.DefaultSchedulerName] require.NotEmpty(t, sched) @@ -182,7 +155,7 @@ var verifySharedIters = func(vus, iters int64) func(t *testing.T, c Config) { } } -var verifyConstantLoopingVUs = func(vus int64, duration time.Duration) func(t *testing.T, c Config) { +func verifyConstantLoopingVUs(vus int64, duration time.Duration) func(t *testing.T, c Config) { return func(t *testing.T, c Config) { sched := c.Execution[lib.DefaultSchedulerName] require.NotEmpty(t, sched) @@ -197,6 +170,8 @@ var verifyConstantLoopingVUs = func(vus int64, duration time.Duration) func(t *t } func mostFlagSets() []*pflag.FlagSet { + //TODO: make this unneccesary... currently these are the only commands in which + // getConsolidatedConfig() is used, but they also have differences in their CLI flags :/ // sigh... compromises... return []*pflag.FlagSet{runCmdFlagSet(), archiveCmdFlagSet(), cloudCmdFlagSet()} } @@ -212,7 +187,8 @@ type opts struct { // exp contains the different events or errors we expect our test case to trigger. // for space and clarity, we use the fact that by default, all of the struct values are false type exp struct { - cliError bool + cliParseError bool + cliReadError bool consolidationError bool validationErrors bool logWarning bool //TODO: remove in the next version? @@ -229,11 +205,11 @@ var configConsolidationTestCases = []configConsolidationTestCase{ // Check that no options will result in 1 VU 1 iter value for execution {opts{}, exp{}, verifyOneIterPerOneVU}, // Verify some CLI errors - {opts{cli: []string{"--blah", "blah"}}, exp{cliError: true}, nil}, - {opts{cli: []string{"--duration", "blah"}}, exp{cliError: true}, nil}, - {opts{cli: []string{"--iterations", "blah"}}, exp{cliError: true}, nil}, - {opts{cli: []string{"--execution", ""}}, exp{cliError: true}, nil}, - {opts{cli: []string{"--stage", "10:20s"}}, exp{cliError: true}, nil}, + {opts{cli: []string{"--blah", "blah"}}, exp{cliParseError: true}, nil}, + {opts{cli: []string{"--duration", "blah"}}, exp{cliParseError: true}, nil}, + {opts{cli: []string{"--iterations", "blah"}}, exp{cliParseError: true}, nil}, + {opts{cli: []string{"--execution", ""}}, exp{cliParseError: true}, nil}, + {opts{cli: []string{"--stage", "10:20s"}}, exp{cliReadError: true}, nil}, // Check if CLI shortcuts generate correct execution values {opts{cli: []string{"--vus", "1", "--iterations", "5"}}, exp{}, verifySharedIters(1, 5)}, {opts{cli: []string{"-u", "2", "-i", "6"}}, exp{}, verifySharedIters(2, 6)}, @@ -261,36 +237,40 @@ var configConsolidationTestCases = []configConsolidationTestCase{ } func TestConfigConsolidation(t *testing.T) { - logHook := simpleLogrusHook{levels: []log.Level{log.WarnLevel}} + // This test and its subtests shouldn't be ran in parallel, since they unfortunately have + // to mess with shared global objects (env vars, variables, the log, ... santa?) + logHook := testutils.SimpleLogrusHook{HookedLevels: []log.Level{log.WarnLevel}} log.AddHook(&logHook) log.SetOutput(ioutil.Discard) defer log.SetOutput(os.Stderr) runTestCase := func(t *testing.T, testCase configConsolidationTestCase, flagSet *pflag.FlagSet) { t.Logf("Test with opts=%#v and exp=%#v\n", testCase.options, testCase.expected) - logHook.drain() + logHook.Drain() restoreEnv := setEnv(t, testCase.options.env) defer restoreEnv() - //TODO: also remove these hacks when we improve the configuration... - getTestCaseCliConf := func() (Config, error) { - if err := flagSet.Parse(testCase.options.cli); err != nil { - return Config{}, err - } - if flagSet.Lookup("out") != nil { - return getConfig(flagSet) - } - opts, errOpts := getOptions(flagSet) - return Config{Options: opts}, errOpts + cliErr := flagSet.Parse(testCase.options.cli) + if testCase.expected.cliParseError { + require.Error(t, cliErr) + return } + require.NoError(t, cliErr) - cliConf, err := getTestCaseCliConf() - if testCase.expected.cliError { - require.Error(t, err) + //TODO: remove these hacks when we improve the configuration... + var cliConf Config + if flagSet.Lookup("out") != nil { + cliConf, cliErr = getConfig(flagSet) + } else { + opts, errOpts := getOptions(flagSet) + cliConf, cliErr = Config{Options: opts}, errOpts + } + if testCase.expected.cliReadError { + require.Error(t, cliErr) return } - require.NoError(t, err) + require.NoError(t, cliErr) var runner lib.Runner if testCase.options.runner != nil { @@ -304,7 +284,7 @@ func TestConfigConsolidation(t *testing.T) { } require.NoError(t, err) - warnings := logHook.drain() + warnings := logHook.Drain() if testCase.expected.logWarning { assert.NotEmpty(t, warnings) } else { diff --git a/lib/testutils/logrus_hook.go b/lib/testutils/logrus_hook.go new file mode 100644 index 00000000000..4a355bf8ba0 --- /dev/null +++ b/lib/testutils/logrus_hook.go @@ -0,0 +1,39 @@ +package testutils + +import ( + "sync" + + log "github.com/sirupsen/logrus" +) + +// SimpleLogrusHook implements the logrus.Hook interface and could be used to check +// if log messages were outputted +type SimpleLogrusHook struct { + HookedLevels []log.Level + mutex sync.Mutex + messageCache []log.Entry +} + +// Levels just returns whatever was stored in the HookedLevels slice +func (smh *SimpleLogrusHook) Levels() []log.Level { + return smh.HookedLevels +} + +// Fire saves whatever message the logrus library passed in the cache +func (smh *SimpleLogrusHook) Fire(e *log.Entry) error { + smh.mutex.Lock() + defer smh.mutex.Unlock() + smh.messageCache = append(smh.messageCache, *e) + return nil +} + +// Drain returns the currently stored messages and deletes them from the cache +func (smh *SimpleLogrusHook) Drain() []log.Entry { + smh.mutex.Lock() + defer smh.mutex.Unlock() + res := smh.messageCache + smh.messageCache = []log.Entry{} + return res +} + +var _ log.Hook = &SimpleLogrusHook{} From 2c1adf3e3fa37b83d4a90e4924fc9bc8d8d23b83 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Tue, 5 Mar 2019 19:14:31 +0200 Subject: [PATCH 18/36] Move TestConfigConsolidation() and its helpers to a separate file --- cmd/config_consolidation_test.go | 243 +++++++++++++++++++++++++++++++ cmd/config_test.go | 235 ------------------------------ 2 files changed, 243 insertions(+), 235 deletions(-) create mode 100644 cmd/config_consolidation_test.go diff --git a/cmd/config_consolidation_test.go b/cmd/config_consolidation_test.go new file mode 100644 index 00000000000..cbe0eb64dee --- /dev/null +++ b/cmd/config_consolidation_test.go @@ -0,0 +1,243 @@ +package cmd + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + "testing" + "time" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/scheduler" + "github.com/loadimpact/k6/lib/testutils" + "github.com/loadimpact/k6/lib/types" + log "github.com/sirupsen/logrus" + "github.com/spf13/afero" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + null "gopkg.in/guregu/null.v3" +) + +// A helper funcion for setting arbitrary environment variables and +// restoring the old ones at the end, usually by deferring the returned callback +//TODO: remove these hacks when we improve the configuration... we shouldn't +// have to mess with the global environment at all... +func setEnv(t *testing.T, newEnv []string) (restoreEnv func()) { + actuallSetEnv := func(env []string, abortOnSetErr bool) { + os.Clearenv() + for _, e := range env { + val := "" + pair := strings.SplitN(e, "=", 2) + if len(pair) > 1 { + val = pair[1] + } + err := os.Setenv(pair[0], val) + if abortOnSetErr { + require.NoError(t, err) + } else if err != nil { + t.Logf( + "Received a non-aborting but unexpected error '%s' when setting env.var '%s' to '%s'", + err, pair[0], val, + ) + } + } + } + oldEnv := os.Environ() + actuallSetEnv(newEnv, true) + + return func() { + actuallSetEnv(oldEnv, false) + } +} + +func verifyOneIterPerOneVU(t *testing.T, c Config) { + // No config anywhere should result in a 1 VU with a 1 uninterruptible iteration config + sched := c.Execution[lib.DefaultSchedulerName] + require.NotEmpty(t, sched) + require.IsType(t, scheduler.PerVUIteationsConfig{}, sched) + perVuIters, ok := sched.(scheduler.PerVUIteationsConfig) + require.True(t, ok) + assert.Equal(t, null.NewBool(false, false), perVuIters.Interruptible) + assert.Equal(t, null.NewInt(1, false), perVuIters.Iterations) + assert.Equal(t, null.NewInt(1, false), perVuIters.VUs) + //TODO: verify shortcut options as well? +} + +func verifySharedIters(vus, iters int64) func(t *testing.T, c Config) { + return func(t *testing.T, c Config) { + sched := c.Execution[lib.DefaultSchedulerName] + require.NotEmpty(t, sched) + require.IsType(t, scheduler.SharedIteationsConfig{}, sched) + sharedIterConfig, ok := sched.(scheduler.SharedIteationsConfig) + require.True(t, ok) + assert.Equal(t, null.NewInt(vus, true), sharedIterConfig.VUs) + assert.Equal(t, null.NewInt(iters, true), sharedIterConfig.Iterations) + //TODO: verify shortcut options as well? + } +} + +func verifyConstantLoopingVUs(vus int64, duration time.Duration) func(t *testing.T, c Config) { + return func(t *testing.T, c Config) { + sched := c.Execution[lib.DefaultSchedulerName] + require.NotEmpty(t, sched) + require.IsType(t, scheduler.ConstantLoopingVUsConfig{}, sched) + clvc, ok := sched.(scheduler.ConstantLoopingVUsConfig) + require.True(t, ok) + assert.Equal(t, null.NewBool(true, false), clvc.Interruptible) + assert.Equal(t, null.NewInt(vus, true), clvc.VUs) + assert.Equal(t, types.NullDurationFrom(duration), clvc.Duration) + //TODO: verify shortcut options as well? + } +} + +func mostFlagSets() []*pflag.FlagSet { + //TODO: make this unneccesary... currently these are the only commands in which + // getConsolidatedConfig() is used, but they also have differences in their CLI flags :/ + // sigh... compromises... + return []*pflag.FlagSet{runCmdFlagSet(), archiveCmdFlagSet(), cloudCmdFlagSet()} +} + +type opts struct { + cliFlagSets []*pflag.FlagSet + cli []string + env []string + runner *lib.Options + //TODO: test the JSON config as well... after most of https://github.com/loadimpact/k6/issues/883#issuecomment-468646291 is fixed +} + +// exp contains the different events or errors we expect our test case to trigger. +// for space and clarity, we use the fact that by default, all of the struct values are false +type exp struct { + cliParseError bool + cliReadError bool + consolidationError bool + validationErrors bool + logWarning bool //TODO: remove in the next version? +} + +// A hell of a complicated test case, that still doesn't test things fully... +type configConsolidationTestCase struct { + options opts + expected exp + customValidator func(t *testing.T, c Config) +} + +var configConsolidationTestCases = []configConsolidationTestCase{ + // Check that no options will result in 1 VU 1 iter value for execution + {opts{}, exp{}, verifyOneIterPerOneVU}, + // Verify some CLI errors + {opts{cli: []string{"--blah", "blah"}}, exp{cliParseError: true}, nil}, + {opts{cli: []string{"--duration", "blah"}}, exp{cliParseError: true}, nil}, + {opts{cli: []string{"--iterations", "blah"}}, exp{cliParseError: true}, nil}, + {opts{cli: []string{"--execution", ""}}, exp{cliParseError: true}, nil}, + {opts{cli: []string{"--stage", "10:20s"}}, exp{cliReadError: true}, nil}, + // Check if CLI shortcuts generate correct execution values + {opts{cli: []string{"--vus", "1", "--iterations", "5"}}, exp{}, verifySharedIters(1, 5)}, + {opts{cli: []string{"-u", "2", "-i", "6"}}, exp{}, verifySharedIters(2, 6)}, + {opts{cli: []string{"-u", "3", "-d", "30s"}}, exp{}, verifyConstantLoopingVUs(3, 30*time.Second)}, + {opts{cli: []string{"-u", "4", "--duration", "60s"}}, exp{}, verifyConstantLoopingVUs(4, 1*time.Minute)}, + //TODO: verify stages + // This should get a validation error since VUs are more than the shared iterations + {opts{cli: []string{"--vus", "10", "-i", "6"}}, exp{validationErrors: true}, verifySharedIters(10, 6)}, + // These should emit a warning + {opts{cli: []string{"-u", "1", "-i", "6", "-d", "10s"}}, exp{logWarning: true}, nil}, + {opts{cli: []string{"-u", "2", "-d", "10s", "-s", "10s:20"}}, exp{logWarning: true}, nil}, + {opts{cli: []string{"-u", "3", "-i", "5", "-s", "10s:20"}}, exp{logWarning: true}, nil}, + {opts{cli: []string{"-u", "3", "-d", "0"}}, exp{logWarning: true}, nil}, + // Test if environment variable shortcuts are working as expected + {opts{env: []string{"K6_VUS=5", "K6_ITERATIONS=15"}}, exp{}, verifySharedIters(5, 15)}, + {opts{env: []string{"K6_VUS=10", "K6_DURATION=20s"}}, exp{}, verifyConstantLoopingVUs(10, 20*time.Second)}, + + //TODO: test combinations between options and levels + //TODO: test the future full overwriting of the duration/iterations/stages/execution options + + // Just in case, verify that no options will result in the same 1 vu 1 iter config + {opts{}, exp{}, verifyOneIterPerOneVU}, + //TODO: test for differences between flagsets + //TODO: more tests in general... +} + +func runTestCase(t *testing.T, testCase configConsolidationTestCase, flagSet *pflag.FlagSet, logHook *testutils.SimpleLogrusHook) { + t.Logf("Test with opts=%#v and exp=%#v\n", testCase.options, testCase.expected) + logHook.Drain() + + restoreEnv := setEnv(t, testCase.options.env) + defer restoreEnv() + + cliErr := flagSet.Parse(testCase.options.cli) + if testCase.expected.cliParseError { + require.Error(t, cliErr) + return + } + require.NoError(t, cliErr) + + //TODO: remove these hacks when we improve the configuration... + var cliConf Config + if flagSet.Lookup("out") != nil { + cliConf, cliErr = getConfig(flagSet) + } else { + opts, errOpts := getOptions(flagSet) + cliConf, cliErr = Config{Options: opts}, errOpts + } + if testCase.expected.cliReadError { + require.Error(t, cliErr) + return + } + require.NoError(t, cliErr) + + var runner lib.Runner + if testCase.options.runner != nil { + runner = &lib.MiniRunner{Options: *testCase.options.runner} + } + fs := afero.NewMemMapFs() //TODO: test JSON configs as well! + result, err := getConsolidatedConfig(fs, cliConf, runner) + if testCase.expected.consolidationError { + require.Error(t, err) + return + } + require.NoError(t, err) + + warnings := logHook.Drain() + if testCase.expected.logWarning { + assert.NotEmpty(t, warnings) + } else { + assert.Empty(t, warnings) + } + + validationErrors := result.Validate() + if testCase.expected.validationErrors { + assert.NotEmpty(t, validationErrors) + } else { + assert.Empty(t, validationErrors) + } + + if testCase.customValidator != nil { + testCase.customValidator(t, result) + } +} + +func TestConfigConsolidation(t *testing.T) { + // This test and its subtests shouldn't be ran in parallel, since they unfortunately have + // to mess with shared global objects (env vars, variables, the log, ... santa?) + logHook := testutils.SimpleLogrusHook{HookedLevels: []log.Level{log.WarnLevel}} + log.AddHook(&logHook) + log.SetOutput(ioutil.Discard) + defer log.SetOutput(os.Stderr) + + for tcNum, testCase := range configConsolidationTestCases { + flagSets := testCase.options.cliFlagSets + if flagSets == nil { // handle the most common case + flagSets = mostFlagSets() + } + for fsNum, flagSet := range flagSets { + // I want to paralelize this, but I cannot... due to global variables and other + // questionable architectural choices... :| + t.Run( + fmt.Sprintf("TestCase#%d_FlagSet#%d", tcNum, fsNum), + func(t *testing.T) { runTestCase(t, testCase, flagSet, &logHook) }, + ) + } + } +} diff --git a/cmd/config_test.go b/cmd/config_test.go index 3cc01b1fd7d..270de7fa5a4 100644 --- a/cmd/config_test.go +++ b/cmd/config_test.go @@ -21,25 +21,11 @@ package cmd import ( - "fmt" - "io/ioutil" "os" - "strings" "testing" - "time" - - "github.com/loadimpact/k6/lib/scheduler" - "github.com/loadimpact/k6/lib/testutils" - "github.com/loadimpact/k6/lib/types" - - "github.com/spf13/afero" - "github.com/spf13/pflag" "github.com/kelseyhightower/envconfig" - "github.com/loadimpact/k6/lib" - log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "gopkg.in/guregu/null.v3" ) @@ -97,227 +83,6 @@ func TestConfigCmd(t *testing.T) { } } -// A helper funcion for setting arbitrary environment variables and -// restoring the old ones at the end, usually by deferring the returned callback -//TODO: remove these hacks when we improve the configuration... we shouldn't -// have to mess with the global environment at all... -func setEnv(t *testing.T, newEnv []string) (restoreEnv func()) { - actuallSetEnv := func(env []string, abortOnSetErr bool) { - os.Clearenv() - for _, e := range env { - val := "" - pair := strings.SplitN(e, "=", 2) - if len(pair) > 1 { - val = pair[1] - } - err := os.Setenv(pair[0], val) - if abortOnSetErr { - require.NoError(t, err) - } else if err != nil { - t.Logf( - "Received a non-aborting but unexpected error '%s' when setting env.var '%s' to '%s'", - err, pair[0], val, - ) - } - } - } - oldEnv := os.Environ() - actuallSetEnv(newEnv, true) - - return func() { - actuallSetEnv(oldEnv, false) - } -} - -func verifyOneIterPerOneVU(t *testing.T, c Config) { - // No config anywhere should result in a 1 VU with a 1 uninterruptible iteration config - sched := c.Execution[lib.DefaultSchedulerName] - require.NotEmpty(t, sched) - require.IsType(t, scheduler.PerVUIteationsConfig{}, sched) - perVuIters, ok := sched.(scheduler.PerVUIteationsConfig) - require.True(t, ok) - assert.Equal(t, null.NewBool(false, false), perVuIters.Interruptible) - assert.Equal(t, null.NewInt(1, false), perVuIters.Iterations) - assert.Equal(t, null.NewInt(1, false), perVuIters.VUs) - //TODO: verify shortcut options as well? -} - -func verifySharedIters(vus, iters int64) func(t *testing.T, c Config) { - return func(t *testing.T, c Config) { - sched := c.Execution[lib.DefaultSchedulerName] - require.NotEmpty(t, sched) - require.IsType(t, scheduler.SharedIteationsConfig{}, sched) - sharedIterConfig, ok := sched.(scheduler.SharedIteationsConfig) - require.True(t, ok) - assert.Equal(t, null.NewInt(vus, true), sharedIterConfig.VUs) - assert.Equal(t, null.NewInt(iters, true), sharedIterConfig.Iterations) - //TODO: verify shortcut options as well? - } -} - -func verifyConstantLoopingVUs(vus int64, duration time.Duration) func(t *testing.T, c Config) { - return func(t *testing.T, c Config) { - sched := c.Execution[lib.DefaultSchedulerName] - require.NotEmpty(t, sched) - require.IsType(t, scheduler.ConstantLoopingVUsConfig{}, sched) - clvc, ok := sched.(scheduler.ConstantLoopingVUsConfig) - require.True(t, ok) - assert.Equal(t, null.NewBool(true, false), clvc.Interruptible) - assert.Equal(t, null.NewInt(vus, true), clvc.VUs) - assert.Equal(t, types.NullDurationFrom(duration), clvc.Duration) - //TODO: verify shortcut options as well? - } -} - -func mostFlagSets() []*pflag.FlagSet { - //TODO: make this unneccesary... currently these are the only commands in which - // getConsolidatedConfig() is used, but they also have differences in their CLI flags :/ - // sigh... compromises... - return []*pflag.FlagSet{runCmdFlagSet(), archiveCmdFlagSet(), cloudCmdFlagSet()} -} - -type opts struct { - cliFlagSets []*pflag.FlagSet - cli []string - env []string - runner *lib.Options - //TODO: test the JSON config as well... after most of https://github.com/loadimpact/k6/issues/883#issuecomment-468646291 is fixed -} - -// exp contains the different events or errors we expect our test case to trigger. -// for space and clarity, we use the fact that by default, all of the struct values are false -type exp struct { - cliParseError bool - cliReadError bool - consolidationError bool - validationErrors bool - logWarning bool //TODO: remove in the next version? -} - -// A hell of a complicated test case, that still doesn't test things fully... -type configConsolidationTestCase struct { - options opts - expected exp - customValidator func(t *testing.T, c Config) -} - -var configConsolidationTestCases = []configConsolidationTestCase{ - // Check that no options will result in 1 VU 1 iter value for execution - {opts{}, exp{}, verifyOneIterPerOneVU}, - // Verify some CLI errors - {opts{cli: []string{"--blah", "blah"}}, exp{cliParseError: true}, nil}, - {opts{cli: []string{"--duration", "blah"}}, exp{cliParseError: true}, nil}, - {opts{cli: []string{"--iterations", "blah"}}, exp{cliParseError: true}, nil}, - {opts{cli: []string{"--execution", ""}}, exp{cliParseError: true}, nil}, - {opts{cli: []string{"--stage", "10:20s"}}, exp{cliReadError: true}, nil}, - // Check if CLI shortcuts generate correct execution values - {opts{cli: []string{"--vus", "1", "--iterations", "5"}}, exp{}, verifySharedIters(1, 5)}, - {opts{cli: []string{"-u", "2", "-i", "6"}}, exp{}, verifySharedIters(2, 6)}, - {opts{cli: []string{"-u", "3", "-d", "30s"}}, exp{}, verifyConstantLoopingVUs(3, 30*time.Second)}, - {opts{cli: []string{"-u", "4", "--duration", "60s"}}, exp{}, verifyConstantLoopingVUs(4, 1*time.Minute)}, - //TODO: verify stages - // This should get a validation error since VUs are more than the shared iterations - {opts{cli: []string{"--vus", "10", "-i", "6"}}, exp{validationErrors: true}, verifySharedIters(10, 6)}, - // These should emit a warning - {opts{cli: []string{"-u", "1", "-i", "6", "-d", "10s"}}, exp{logWarning: true}, nil}, - {opts{cli: []string{"-u", "2", "-d", "10s", "-s", "10s:20"}}, exp{logWarning: true}, nil}, - {opts{cli: []string{"-u", "3", "-i", "5", "-s", "10s:20"}}, exp{logWarning: true}, nil}, - {opts{cli: []string{"-u", "3", "-d", "0"}}, exp{logWarning: true}, nil}, - // Test if environment variable shortcuts are working as expected - {opts{env: []string{"K6_VUS=5", "K6_ITERATIONS=15"}}, exp{}, verifySharedIters(5, 15)}, - {opts{env: []string{"K6_VUS=10", "K6_DURATION=20s"}}, exp{}, verifyConstantLoopingVUs(10, 20*time.Second)}, - - //TODO: test combinations between options and levels - //TODO: test the future full overwriting of the duration/iterations/stages/execution options - - // Just in case, verify that no options will result in the same 1 vu 1 iter config - {opts{}, exp{}, verifyOneIterPerOneVU}, - //TODO: test for differences between flagsets - //TODO: more tests in general... -} - -func TestConfigConsolidation(t *testing.T) { - // This test and its subtests shouldn't be ran in parallel, since they unfortunately have - // to mess with shared global objects (env vars, variables, the log, ... santa?) - logHook := testutils.SimpleLogrusHook{HookedLevels: []log.Level{log.WarnLevel}} - log.AddHook(&logHook) - log.SetOutput(ioutil.Discard) - defer log.SetOutput(os.Stderr) - - runTestCase := func(t *testing.T, testCase configConsolidationTestCase, flagSet *pflag.FlagSet) { - t.Logf("Test with opts=%#v and exp=%#v\n", testCase.options, testCase.expected) - logHook.Drain() - - restoreEnv := setEnv(t, testCase.options.env) - defer restoreEnv() - - cliErr := flagSet.Parse(testCase.options.cli) - if testCase.expected.cliParseError { - require.Error(t, cliErr) - return - } - require.NoError(t, cliErr) - - //TODO: remove these hacks when we improve the configuration... - var cliConf Config - if flagSet.Lookup("out") != nil { - cliConf, cliErr = getConfig(flagSet) - } else { - opts, errOpts := getOptions(flagSet) - cliConf, cliErr = Config{Options: opts}, errOpts - } - if testCase.expected.cliReadError { - require.Error(t, cliErr) - return - } - require.NoError(t, cliErr) - - var runner lib.Runner - if testCase.options.runner != nil { - runner = &lib.MiniRunner{Options: *testCase.options.runner} - } - fs := afero.NewMemMapFs() //TODO: test JSON configs as well! - result, err := getConsolidatedConfig(fs, cliConf, runner) - if testCase.expected.consolidationError { - require.Error(t, err) - return - } - require.NoError(t, err) - - warnings := logHook.Drain() - if testCase.expected.logWarning { - assert.NotEmpty(t, warnings) - } else { - assert.Empty(t, warnings) - } - - validationErrors := result.Validate() - if testCase.expected.validationErrors { - assert.NotEmpty(t, validationErrors) - } else { - assert.Empty(t, validationErrors) - } - - if testCase.customValidator != nil { - testCase.customValidator(t, result) - } - } - - for tcNum, testCase := range configConsolidationTestCases { - flagSets := testCase.options.cliFlagSets - if flagSets == nil { // handle the most common case - flagSets = mostFlagSets() - } - for fsNum, flagSet := range flagSets { - // I want to paralelize this, but I cannot... due to global variables and other - // questionable architectural choices... :| - t.Run( - fmt.Sprintf("TestCase#%d_FlagSet#%d", tcNum, fsNum), - func(t *testing.T) { runTestCase(t, testCase, flagSet) }, - ) - } - } -} func TestConfigEnv(t *testing.T) { testdata := map[struct{ Name, Key string }]map[string]func(Config){ {"Linger", "K6_LINGER"}: { From 62ff4aad4cc22beccaf6b0c3783568c3b47a90f4 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Tue, 5 Mar 2019 21:52:35 +0200 Subject: [PATCH 19/36] Fix a typo in a comment --- cmd/config_consolidation_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/config_consolidation_test.go b/cmd/config_consolidation_test.go index cbe0eb64dee..be6edc7ce54 100644 --- a/cmd/config_consolidation_test.go +++ b/cmd/config_consolidation_test.go @@ -93,7 +93,7 @@ func verifyConstantLoopingVUs(vus int64, duration time.Duration) func(t *testing } func mostFlagSets() []*pflag.FlagSet { - //TODO: make this unneccesary... currently these are the only commands in which + //TODO: make this unnecessary... currently these are the only commands in which // getConsolidatedConfig() is used, but they also have differences in their CLI flags :/ // sigh... compromises... return []*pflag.FlagSet{runCmdFlagSet(), archiveCmdFlagSet(), cloudCmdFlagSet()} From 704dcc334bc86cad441d6d03db0a62156e0446f5 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Wed, 6 Mar 2019 10:50:41 +0200 Subject: [PATCH 20/36] Fix the duplicate config file path CLI flag --- cmd/archive.go | 1 - cmd/config.go | 57 +++++++++++++++---------------------------- cmd/login_cloud.go | 4 +-- cmd/login_influxdb.go | 4 +-- cmd/root.go | 15 ++++++++---- 5 files changed, 34 insertions(+), 47 deletions(-) diff --git a/cmd/archive.go b/cmd/archive.go index 8263715870f..893e71afef2 100644 --- a/cmd/archive.go +++ b/cmd/archive.go @@ -96,7 +96,6 @@ func archiveCmdFlagSet() *pflag.FlagSet { flags.SortFlags = false flags.AddFlagSet(optionFlagSet()) flags.AddFlagSet(runtimeOptionFlagSet(false)) - flags.AddFlagSet(configFileFlagSet()) //TODO: figure out a better way to handle the CLI flags - global variables are not very testable... :/ flags.StringVarP(&archiveOut, "archive-out", "O", archiveOut, "archive output filename") return flags diff --git a/cmd/config.go b/cmd/config.go index 7d92536f2dd..cf0854f0761 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -22,7 +22,6 @@ package cmd import ( "encoding/json" - "io/ioutil" "os" "github.com/kelseyhightower/envconfig" @@ -34,26 +33,12 @@ import ( "github.com/loadimpact/k6/stats/kafka" "github.com/loadimpact/k6/stats/statsd/common" "github.com/pkg/errors" - "github.com/shibukawa/configdir" log "github.com/sirupsen/logrus" "github.com/spf13/afero" "github.com/spf13/pflag" null "gopkg.in/guregu/null.v3" ) -const configFilename = "config.json" - -var configDirs = configdir.New("loadimpact", "k6") -var configFile = os.Getenv("K6_CONFIG") // overridden by `-c` flag! - -// configFileFlagSet returns a FlagSet that contains flags needed for specifying a config file. -func configFileFlagSet() *pflag.FlagSet { - //TODO: remove? - flags := pflag.NewFlagSet("", 0) - flags.StringVarP(&configFile, "config", "c", configFile, "specify config file to read") - return flags -} - // configFlagSet returns a FlagSet with the default run configuration flags. func configFlagSet() *pflag.FlagSet { flags := pflag.NewFlagSet("", 0) @@ -63,7 +48,6 @@ func configFlagSet() *pflag.FlagSet { flags.Bool("no-usage-report", false, "don't send anonymous stats to the developers") flags.Bool("no-thresholds", false, "don't run thresholds") flags.Bool("no-summary", false, "don't show the summary at the end of the test") - flags.AddFlagSet(configFileFlagSet()) return flags } @@ -130,41 +114,40 @@ func getConfig(flags *pflag.FlagSet) (Config, error) { }, nil } -// Reads a configuration file from disk. -func readDiskConfig(fs afero.Fs) (Config, *configdir.Config, error) { - if configFile != "" { - data, err := ioutil.ReadFile(configFile) - if err != nil { - return Config{}, nil, err - } - var conf Config - err = json.Unmarshal(data, &conf) - return conf, nil, err +// Reads a configuration file from disk and returns it and its path. +func readDiskConfig(fs afero.Fs) (Config, string, error) { + realConfigFilePath := configFilePath + if realConfigFilePath == "" { + // The user didn't specify K6_CONFIG or --config, use the default path + realConfigFilePath = defaultConfigFilePath } - cdir := configDirs.QueryFolderContainsFile(configFilename) - if cdir == nil { - return Config{}, configDirs.QueryFolders(configdir.Global)[0], nil + // Try to see if the file exists in the supplied filesystem + if _, err := fs.Stat(realConfigFilePath); err != nil { + if os.IsNotExist(err) && configFilePath == "" { + // If the file doesn't exist, but it was the default config file (i.e. the user + // didn't specify anything), silence the error + err = nil + } + return Config{}, realConfigFilePath, err } - data, err := cdir.ReadFile(configFilename) + + data, err := afero.ReadFile(fs, realConfigFilePath) if err != nil { - return Config{}, cdir, err + return Config{}, realConfigFilePath, err } var conf Config err = json.Unmarshal(data, &conf) - return conf, cdir, err + return conf, realConfigFilePath, err } // Writes configuration back to disk. -func writeDiskConfig(fs afero.Fs, cdir *configdir.Config, conf Config) error { +func writeDiskConfig(fs afero.Fs, configPath string, conf Config) error { data, err := json.MarshalIndent(conf, "", " ") if err != nil { return err } - if configFile != "" { - return afero.WriteFile(fs, configFilename, data, 0644) - } - return cdir.WriteFile(configFilename, data) + return afero.WriteFile(fs, configPath, data, 0644) } // Reads configuration variables from the environment. diff --git a/cmd/login_cloud.go b/cmd/login_cloud.go index c3523003887..2cbc101541f 100644 --- a/cmd/login_cloud.go +++ b/cmd/login_cloud.go @@ -57,7 +57,7 @@ This will set the default token used when just "k6 run -o cloud" is passed.`, return err } - currentDiskConf, cdir, err := readDiskConfig(fs) + currentDiskConf, configPath, err := readDiskConfig(fs) if err != nil { return err } @@ -109,7 +109,7 @@ This will set the default token used when just "k6 run -o cloud" is passed.`, } currentDiskConf.Collectors.Cloud = newCloudConf - if err := writeDiskConfig(fs, cdir, currentDiskConf); err != nil { + if err := writeDiskConfig(fs, configPath, currentDiskConf); err != nil { return err } diff --git a/cmd/login_influxdb.go b/cmd/login_influxdb.go index 1ff785babe6..134e471fdaf 100644 --- a/cmd/login_influxdb.go +++ b/cmd/login_influxdb.go @@ -42,7 +42,7 @@ This will set the default server used when just "-o influxdb" is passed.`, Args: cobra.MaximumNArgs(1), RunE: func(cmd *cobra.Command, args []string) error { fs := afero.NewOsFs() - config, cdir, err := readDiskConfig(fs) + config, configPath, err := readDiskConfig(fs) if err != nil { return err } @@ -105,7 +105,7 @@ This will set the default server used when just "-o influxdb" is passed.`, } config.Collectors.InfluxDB = conf - return writeDiskConfig(fs, cdir, config) + return writeDiskConfig(fs, configPath, config) }, } diff --git a/cmd/root.go b/cmd/root.go index 715e2cb2f78..0b03980e111 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -54,9 +54,12 @@ var ( stderr = consoleWriter{colorable.NewColorableStderr(), stderrTTY, outMutex} ) -var ( - cfgFile string +const defaultConfigFileName = "config.json" + +var defaultConfigFilePath = defaultConfigFileName // Updated with the user's config folder in the init() function below +var configFilePath = os.Getenv("K6_CONFIG") // Overridden by `-c`/`--config` flag! +var ( verbose bool quiet bool noColor bool @@ -94,10 +97,12 @@ func Execute() { } func init() { - defaultConfigPathMsg := "" + // TODO: find a better library... or better yet, simply port the few dozen lines of code for getting the + // per-user config folder in a cross-platform way + configDirs := configdir.New("loadimpact", "k6") configFolders := configDirs.QueryFolders(configdir.Global) if len(configFolders) > 0 { - defaultConfigPathMsg = fmt.Sprintf(" (default %s)", filepath.Join(configFolders[0].Path, configFilename)) + defaultConfigFilePath = filepath.Join(configFolders[0].Path, defaultConfigFileName) } //TODO: figure out a better way to handle the CLI flags - global variables are not very testable... :/ @@ -106,7 +111,7 @@ func init() { RootCmd.PersistentFlags().BoolVar(&noColor, "no-color", false, "disable colored output") RootCmd.PersistentFlags().StringVar(&logFmt, "logformat", "", "log output format") RootCmd.PersistentFlags().StringVarP(&address, "address", "a", "localhost:6565", "address for the api server") - RootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", "", "config file"+defaultConfigPathMsg) //TODO: figure out and fix? + RootCmd.PersistentFlags().StringVarP(&configFilePath, "config", "c", "", fmt.Sprintf("config file (default %s)", defaultConfigFilePath)) must(cobra.MarkFlagFilename(RootCmd.PersistentFlags(), "config")) } From a7d0b6fe9db6b97b0252afc9c81fc36cc3265afa Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Wed, 6 Mar 2019 10:54:03 +0200 Subject: [PATCH 21/36] Preserve the trailing spaces in the k6 ASCII banner --- cmd/root.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/cmd/root.go b/cmd/root.go index 0b03980e111..e61545c8a52 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -26,6 +26,7 @@ import ( golog "log" "os" "path/filepath" + "strings" "sync" "github.com/fatih/color" @@ -37,13 +38,13 @@ import ( ) var Version = "0.23.1" -var Banner = ` - /\ |‾‾| /‾‾/ /‾/ - /\ / \ | |_/ / / / - / \/ \ | | / ‾‾\ - / \ | |‾\ \ | (_) | - / __________ \ |__| \__\ \___/ .io` - +var Banner = strings.Join([]string{ + ` /\ |‾‾| /‾‾/ /‾/ `, + ` /\ / \ | |_/ / / / `, + ` / \/ \ | | / ‾‾\ `, + ` / \ | |‾\ \ | (_) | `, + ` / __________ \ |__| \__\ \___/ .io`, +}, "\n") var BannerColor = color.New(color.FgCyan) var ( From b86d5cd9a804e99e79072015a7854f58616650ca Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Wed, 6 Mar 2019 11:02:07 +0200 Subject: [PATCH 22/36] Automatically create the config file's parent folder --- cmd/config.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cmd/config.go b/cmd/config.go index cf0854f0761..e18dcd91e46 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -23,6 +23,7 @@ package cmd import ( "encoding/json" "os" + "path/filepath" "github.com/kelseyhightower/envconfig" "github.com/loadimpact/k6/lib" @@ -147,6 +148,11 @@ func writeDiskConfig(fs afero.Fs, configPath string, conf Config) error { if err != nil { return err } + + if err := fs.MkdirAll(filepath.Dir(configPath), 0755); err != nil { + return err + } + return afero.WriteFile(fs, configPath, data, 0644) } From c8c902a6f3609cfa61d8790a21fa9aaad942c574 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Wed, 6 Mar 2019 11:09:49 +0200 Subject: [PATCH 23/36] Add a missing copyright notice --- cmd/config.go | 2 +- cmd/config_consolidation_test.go | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/cmd/config.go b/cmd/config.go index e18dcd91e46..a158d6e206d 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -1,7 +1,7 @@ /* * * k6 - a next-generation load testing tool - * Copyright (C) 2016 Load Impact + * Copyright (C) 2019 Load Impact * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as diff --git a/cmd/config_consolidation_test.go b/cmd/config_consolidation_test.go index be6edc7ce54..54e78925e94 100644 --- a/cmd/config_consolidation_test.go +++ b/cmd/config_consolidation_test.go @@ -1,3 +1,22 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ package cmd import ( From 13e506dc0d7a0836bfc460dec856734623f45484 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Thu, 7 Mar 2019 12:05:42 +0200 Subject: [PATCH 24/36] Fix the newline before the banner --- cmd/cloud.go | 3 ++- cmd/root.go | 2 +- cmd/run.go | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/cmd/cloud.go b/cmd/cloud.go index 27f3c01c6fc..e0344778c71 100644 --- a/cmd/cloud.go +++ b/cmd/cloud.go @@ -55,7 +55,8 @@ This will execute the test on the Load Impact cloud service. Use "k6 login cloud k6 cloud script.js`[1:], Args: exactArgsWithMsg(1, "arg should either be \"-\", if reading script from stdin, or a path to a script file"), RunE: func(cmd *cobra.Command, args []string) error { - _, _ = BannerColor.Fprint(stdout, Banner+"\n\n") + //TODO: disable in quiet mode? + _, _ = BannerColor.Fprintf(stdout, "\n%s\n\n", Banner) initBar := ui.ProgressBar{ Width: 60, Left: func() string { return " uploading script" }, diff --git a/cmd/root.go b/cmd/root.go index e61545c8a52..92dc0b1c1cc 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -72,7 +72,7 @@ var ( var RootCmd = &cobra.Command{ Use: "k6", Short: "a next-generation load generator", - Long: BannerColor.Sprint(Banner), + Long: BannerColor.Sprintf("\n%s", Banner), SilenceUsage: true, SilenceErrors: true, PersistentPreRun: func(cmd *cobra.Command, args []string) { diff --git a/cmd/run.go b/cmd/run.go index 6e1c1612906..5e108a4768e 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -100,7 +100,8 @@ a commandline interface for interacting with it.`, k6 run -o influxdb=http://1.2.3.4:8086/k6`[1:], Args: exactArgsWithMsg(1, "arg should either be \"-\", if reading script from stdin, or a path to a script file"), RunE: func(cmd *cobra.Command, args []string) error { - _, _ = BannerColor.Fprint(stdout, Banner+"\n\n") + //TODO: disable in quiet mode? + _, _ = BannerColor.Fprintf(stdout, "\n%s\n\n", Banner) initBar := ui.ProgressBar{ Width: 60, From 5d8b35474d64fceba70f586b9194051f3b84128d Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Thu, 7 Mar 2019 12:06:35 +0200 Subject: [PATCH 25/36] Move the root CLI persistent flags to their own flagset --- cmd/root.go | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/cmd/root.go b/cmd/root.go index 92dc0b1c1cc..c5521ae824e 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -35,6 +35,7 @@ import ( "github.com/shibukawa/configdir" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/spf13/pflag" ) var Version = "0.23.1" @@ -97,6 +98,19 @@ func Execute() { } } +func rootCmdPersistentFlagSet() *pflag.FlagSet { + flags := pflag.NewFlagSet("", pflag.ContinueOnError) + //TODO: figure out a better way to handle the CLI flags - global variables are not very testable... :/ + flags.BoolVarP(&verbose, "verbose", "v", false, "enable debug logging") + flags.BoolVarP(&quiet, "quiet", "q", false, "disable progress updates") + flags.BoolVar(&noColor, "no-color", false, "disable colored output") + flags.StringVar(&logFmt, "logformat", "", "log output format") + flags.StringVarP(&address, "address", "a", "localhost:6565", "address for the api server") + flags.StringVarP(&configFilePath, "config", "c", "", fmt.Sprintf("config file (default %s)", defaultConfigFilePath)) + must(cobra.MarkFlagFilename(flags, "config")) + return flags +} + func init() { // TODO: find a better library... or better yet, simply port the few dozen lines of code for getting the // per-user config folder in a cross-platform way @@ -106,14 +120,7 @@ func init() { defaultConfigFilePath = filepath.Join(configFolders[0].Path, defaultConfigFileName) } - //TODO: figure out a better way to handle the CLI flags - global variables are not very testable... :/ - RootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "enable debug logging") - RootCmd.PersistentFlags().BoolVarP(&quiet, "quiet", "q", false, "disable progress updates") - RootCmd.PersistentFlags().BoolVar(&noColor, "no-color", false, "disable colored output") - RootCmd.PersistentFlags().StringVar(&logFmt, "logformat", "", "log output format") - RootCmd.PersistentFlags().StringVarP(&address, "address", "a", "localhost:6565", "address for the api server") - RootCmd.PersistentFlags().StringVarP(&configFilePath, "config", "c", "", fmt.Sprintf("config file (default %s)", defaultConfigFilePath)) - must(cobra.MarkFlagFilename(RootCmd.PersistentFlags(), "config")) + RootCmd.PersistentFlags().AddFlagSet(rootCmdPersistentFlagSet()) } // fprintf panics when where's an error writing to the supplied io.Writer From 588ef66108f97ed94116e4267d22e5b3ad3242a4 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Thu, 7 Mar 2019 12:54:10 +0200 Subject: [PATCH 26/36] Fix an env.var/CLI flag conflict and issues with CLI usage messages --- cmd/cloud.go | 10 +++++++++- cmd/root.go | 8 +++++++- cmd/run.go | 13 ++++++++++++- 3 files changed, 28 insertions(+), 3 deletions(-) diff --git a/cmd/cloud.go b/cmd/cloud.go index e0344778c71..49178a91a81 100644 --- a/cmd/cloud.go +++ b/cmd/cloud.go @@ -242,8 +242,16 @@ func cloudCmdFlagSet() *pflag.FlagSet { flags.SortFlags = false flags.AddFlagSet(optionFlagSet()) flags.AddFlagSet(runtimeOptionFlagSet(false)) - //TODO: figure out a better way to handle the CLI flags - global variables are not very testable... :/ + + //TODO: Figure out a better way to handle the CLI flags: + // - the default value is specified in this way so we don't overwrire whatever + // was specified via the environment variable + // - global variables are not very testable... :/ flags.BoolVar(&exitOnRunning, "exit-on-running", exitOnRunning, "exits when test reaches the running status") + // We also need to explicitly set the default value for the usage message here, so setting + // K6_EXIT_ON_RUNNING=true won't affect the usage message + flags.Lookup("exit-on-running").DefValue = "false" + return flags } diff --git a/cmd/root.go b/cmd/root.go index c5521ae824e..05337b37981 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -62,6 +62,7 @@ var defaultConfigFilePath = defaultConfigFileName // Updated with the user's con var configFilePath = os.Getenv("K6_CONFIG") // Overridden by `-c`/`--config` flag! var ( + //TODO: have environment variables for configuring these? hopefully after we move away from global vars though... verbose bool quiet bool noColor bool @@ -106,7 +107,12 @@ func rootCmdPersistentFlagSet() *pflag.FlagSet { flags.BoolVar(&noColor, "no-color", false, "disable colored output") flags.StringVar(&logFmt, "logformat", "", "log output format") flags.StringVarP(&address, "address", "a", "localhost:6565", "address for the api server") - flags.StringVarP(&configFilePath, "config", "c", "", fmt.Sprintf("config file (default %s)", defaultConfigFilePath)) + + //TODO: Fix... This default value needed, so both CLI flags and environment variables work + flags.StringVarP(&configFilePath, "config", "c", configFilePath, "JSON config file") + // And we also need to explicitly set the default value for the usage message here, so things + // like `K6_CONFIG="blah" k6 run -h` don't produce a weird usage message + flags.Lookup("config").DefValue = defaultConfigFilePath must(cobra.MarkFlagFilename(flags, "config")) return flags } diff --git a/cmd/run.go b/cmd/run.go index 5e108a4768e..d9eccbeb962 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -481,10 +481,21 @@ func runCmdFlagSet() *pflag.FlagSet { flags.AddFlagSet(optionFlagSet()) flags.AddFlagSet(runtimeOptionFlagSet(true)) flags.AddFlagSet(configFlagSet()) - //TODO: figure out a better way to handle the CLI flags - global variables are not very testable... :/ + + //TODO: Figure out a better way to handle the CLI flags: + // - the default values are specified in this way so we don't overwrire whatever + // was specified via the environment variables + // - but we need to manually specify the DefValue, since that's the default value + // that will be used in the help/usage message - if we don't set it, the environment + // variables will affect the usage message + // - and finally, global variables are not very testable... :/ flags.StringVarP(&runType, "type", "t", runType, "override file `type`, \"js\" or \"archive\"") + flags.Lookup("type").DefValue = "" flags.BoolVar(&runNoSetup, "no-setup", runNoSetup, "don't run setup()") + falseStr := "false" // avoiding goconst warnings... + flags.Lookup("no-setup").DefValue = falseStr flags.BoolVar(&runNoTeardown, "no-teardown", runNoTeardown, "don't run teardown()") + flags.Lookup("no-teardown").DefValue = falseStr return flags } From e76972a40766e9b1d52bd6116aa56adf9b8d1f1c Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Thu, 7 Mar 2019 16:57:04 +0200 Subject: [PATCH 27/36] Improve the CLI flags test framework in the config consolidation test --- cmd/config_consolidation_test.go | 73 ++++++++++++++++++++++++++------ 1 file changed, 61 insertions(+), 12 deletions(-) diff --git a/cmd/config_consolidation_test.go b/cmd/config_consolidation_test.go index 54e78925e94..66bb2264bf0 100644 --- a/cmd/config_consolidation_test.go +++ b/cmd/config_consolidation_test.go @@ -21,6 +21,7 @@ package cmd import ( "fmt" + "io" "io/ioutil" "os" "strings" @@ -111,21 +112,63 @@ func verifyConstantLoopingVUs(vus int64, duration time.Duration) func(t *testing } } -func mostFlagSets() []*pflag.FlagSet { +func mostFlagSets() []flagSetInit { //TODO: make this unnecessary... currently these are the only commands in which // getConsolidatedConfig() is used, but they also have differences in their CLI flags :/ // sigh... compromises... - return []*pflag.FlagSet{runCmdFlagSet(), archiveCmdFlagSet(), cloudCmdFlagSet()} + result := []flagSetInit{} + for i, fsi := range []flagSetInit{runCmdFlagSet, archiveCmdFlagSet, cloudCmdFlagSet} { + i, fsi := i, fsi // go... + result = append(result, func() *pflag.FlagSet { + flags := pflag.NewFlagSet(fmt.Sprintf("superContrivedFlags_%d", i), pflag.ContinueOnError) + flags.AddFlagSet(rootCmdPersistentFlagSet()) + flags.AddFlagSet(fsi()) + return flags + }) + } + return result } +type flagSetInit func() *pflag.FlagSet + type opts struct { - cliFlagSets []*pflag.FlagSet - cli []string - env []string - runner *lib.Options - //TODO: test the JSON config as well... after most of https://github.com/loadimpact/k6/issues/883#issuecomment-468646291 is fixed + cli []string + env []string + runner *lib.Options + + //TODO: remove this when the configuration is more reproducible and sane... + // We use a func, because initializing a FlagSet that points to variables + // actually will change those variables to their default values :| In our + // case, this happens only some of the time, for global variables that + // are configurable only via CLI flags, but not environment variables. + // + // For the rest, their default value is their current value, since that + // has been set from the environment variable. That has a bunch of other + // issues on its own, and the func() doesn't help at all, and we need to + // use the resetStickyGlobalVars() hack on top of that... + cliFlagSetInits []flagSetInit +} + +func resetStickyGlobalVars() { + //TODO: remove after fixing the config, obviously a dirty hack + exitOnRunning = false + configFilePath = "" + runType = "" + runNoSetup = false + runNoTeardown = false } +// Something that makes the test also be a valid io.Writer, useful for passing it +// as an output for logs and CLI flag help messages... +type testOutput struct{ *testing.T } + +func (to testOutput) Write(p []byte) (n int, err error) { + to.Logf("%s", p) + return len(p), nil +} + +var _ io.Writer = testOutput{} + // exp contains the different events or errors we expect our test case to trigger. // for space and clarity, we use the fact that by default, all of the struct values are false type exp struct { @@ -178,13 +221,19 @@ var configConsolidationTestCases = []configConsolidationTestCase{ //TODO: more tests in general... } -func runTestCase(t *testing.T, testCase configConsolidationTestCase, flagSet *pflag.FlagSet, logHook *testutils.SimpleLogrusHook) { +func runTestCase(t *testing.T, testCase configConsolidationTestCase, newFlagSet flagSetInit, logHook *testutils.SimpleLogrusHook) { t.Logf("Test with opts=%#v and exp=%#v\n", testCase.options, testCase.expected) + log.SetOutput(testOutput{t}) logHook.Drain() restoreEnv := setEnv(t, testCase.options.env) defer restoreEnv() + flagSet := newFlagSet() + defer resetStickyGlobalVars() + flagSet.SetOutput(testOutput{t}) + flagSet.PrintDefaults() + cliErr := flagSet.Parse(testCase.options.cli) if testCase.expected.cliParseError { require.Error(t, cliErr) @@ -246,11 +295,11 @@ func TestConfigConsolidation(t *testing.T) { defer log.SetOutput(os.Stderr) for tcNum, testCase := range configConsolidationTestCases { - flagSets := testCase.options.cliFlagSets - if flagSets == nil { // handle the most common case - flagSets = mostFlagSets() + flagSetInits := testCase.options.cliFlagSetInits + if flagSetInits == nil { // handle the most common case + flagSetInits = mostFlagSets() } - for fsNum, flagSet := range flagSets { + for fsNum, flagSet := range flagSetInits { // I want to paralelize this, but I cannot... due to global variables and other // questionable architectural choices... :| t.Run( From 295abd2792acd20f6733a46858c794c9c1188ac5 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Thu, 7 Mar 2019 17:24:09 +0200 Subject: [PATCH 28/36] Add support for testing the JSON config in the consolidation as well --- cmd/config_consolidation_test.go | 105 ++++++++++++++++++++----------- 1 file changed, 69 insertions(+), 36 deletions(-) diff --git a/cmd/config_consolidation_test.go b/cmd/config_consolidation_test.go index 66bb2264bf0..be68ea23375 100644 --- a/cmd/config_consolidation_test.go +++ b/cmd/config_consolidation_test.go @@ -129,12 +129,25 @@ func mostFlagSets() []flagSetInit { return result } +type file struct { + filepath, contents string +} + +func getFS(files []file) afero.Fs { + fs := afero.NewMemMapFs() + for _, f := range files { + must(afero.WriteFile(fs, f.filepath, []byte(f.contents), 0644)) // modes don't matter in the afero.MemMapFs + } + return fs +} + type flagSetInit func() *pflag.FlagSet type opts struct { cli []string env []string runner *lib.Options + fs afero.Fs //TODO: remove this when the configuration is more reproducible and sane... // We use a func, because initializing a FlagSet that points to variables @@ -186,39 +199,55 @@ type configConsolidationTestCase struct { customValidator func(t *testing.T, c Config) } -var configConsolidationTestCases = []configConsolidationTestCase{ - // Check that no options will result in 1 VU 1 iter value for execution - {opts{}, exp{}, verifyOneIterPerOneVU}, - // Verify some CLI errors - {opts{cli: []string{"--blah", "blah"}}, exp{cliParseError: true}, nil}, - {opts{cli: []string{"--duration", "blah"}}, exp{cliParseError: true}, nil}, - {opts{cli: []string{"--iterations", "blah"}}, exp{cliParseError: true}, nil}, - {opts{cli: []string{"--execution", ""}}, exp{cliParseError: true}, nil}, - {opts{cli: []string{"--stage", "10:20s"}}, exp{cliReadError: true}, nil}, - // Check if CLI shortcuts generate correct execution values - {opts{cli: []string{"--vus", "1", "--iterations", "5"}}, exp{}, verifySharedIters(1, 5)}, - {opts{cli: []string{"-u", "2", "-i", "6"}}, exp{}, verifySharedIters(2, 6)}, - {opts{cli: []string{"-u", "3", "-d", "30s"}}, exp{}, verifyConstantLoopingVUs(3, 30*time.Second)}, - {opts{cli: []string{"-u", "4", "--duration", "60s"}}, exp{}, verifyConstantLoopingVUs(4, 1*time.Minute)}, - //TODO: verify stages - // This should get a validation error since VUs are more than the shared iterations - {opts{cli: []string{"--vus", "10", "-i", "6"}}, exp{validationErrors: true}, verifySharedIters(10, 6)}, - // These should emit a warning - {opts{cli: []string{"-u", "1", "-i", "6", "-d", "10s"}}, exp{logWarning: true}, nil}, - {opts{cli: []string{"-u", "2", "-d", "10s", "-s", "10s:20"}}, exp{logWarning: true}, nil}, - {opts{cli: []string{"-u", "3", "-i", "5", "-s", "10s:20"}}, exp{logWarning: true}, nil}, - {opts{cli: []string{"-u", "3", "-d", "0"}}, exp{logWarning: true}, nil}, - // Test if environment variable shortcuts are working as expected - {opts{env: []string{"K6_VUS=5", "K6_ITERATIONS=15"}}, exp{}, verifySharedIters(5, 15)}, - {opts{env: []string{"K6_VUS=10", "K6_DURATION=20s"}}, exp{}, verifyConstantLoopingVUs(10, 20*time.Second)}, - - //TODO: test combinations between options and levels - //TODO: test the future full overwriting of the duration/iterations/stages/execution options - - // Just in case, verify that no options will result in the same 1 vu 1 iter config - {opts{}, exp{}, verifyOneIterPerOneVU}, - //TODO: test for differences between flagsets - //TODO: more tests in general... +func getConfigConsolidationTestCases() []configConsolidationTestCase { + // This is a function, because some of these test cases actually need for the init() functions + // to be executed, since they depend on defaultConfigFilePath + return []configConsolidationTestCase{ + // Check that no options will result in 1 VU 1 iter value for execution + {opts{}, exp{}, verifyOneIterPerOneVU}, + // Verify some CLI errors + {opts{cli: []string{"--blah", "blah"}}, exp{cliParseError: true}, nil}, + {opts{cli: []string{"--duration", "blah"}}, exp{cliParseError: true}, nil}, + {opts{cli: []string{"--iterations", "blah"}}, exp{cliParseError: true}, nil}, + {opts{cli: []string{"--execution", ""}}, exp{cliParseError: true}, nil}, + {opts{cli: []string{"--stage", "10:20s"}}, exp{cliReadError: true}, nil}, + // Check if CLI shortcuts generate correct execution values + {opts{cli: []string{"--vus", "1", "--iterations", "5"}}, exp{}, verifySharedIters(1, 5)}, + {opts{cli: []string{"-u", "2", "-i", "6"}}, exp{}, verifySharedIters(2, 6)}, + {opts{cli: []string{"-u", "3", "-d", "30s"}}, exp{}, verifyConstantLoopingVUs(3, 30*time.Second)}, + {opts{cli: []string{"-u", "4", "--duration", "60s"}}, exp{}, verifyConstantLoopingVUs(4, 1*time.Minute)}, + //TODO: verify stages + // This should get a validation error since VUs are more than the shared iterations + {opts{cli: []string{"--vus", "10", "-i", "6"}}, exp{validationErrors: true}, verifySharedIters(10, 6)}, + // These should emit a warning + {opts{cli: []string{"-u", "1", "-i", "6", "-d", "10s"}}, exp{logWarning: true}, nil}, + {opts{cli: []string{"-u", "2", "-d", "10s", "-s", "10s:20"}}, exp{logWarning: true}, nil}, + {opts{cli: []string{"-u", "3", "-i", "5", "-s", "10s:20"}}, exp{logWarning: true}, nil}, + {opts{cli: []string{"-u", "3", "-d", "0"}}, exp{logWarning: true}, nil}, + // Test if environment variable shortcuts are working as expected + {opts{env: []string{"K6_VUS=5", "K6_ITERATIONS=15"}}, exp{}, verifySharedIters(5, 15)}, + {opts{env: []string{"K6_VUS=10", "K6_DURATION=20s"}}, exp{}, verifyConstantLoopingVUs(10, 20*time.Second)}, + //TODO: more env var tests + + // Test if JSON configs work as expected + {opts{fs: getFS([]file{{defaultConfigFilePath, `{"iterations": 77, "vus": 7}`}})}, exp{}, verifySharedIters(7, 77)}, + {opts{fs: getFS([]file{{defaultConfigFilePath, `wrong-json`}})}, exp{consolidationError: true}, nil}, + {opts{fs: getFS(nil), cli: []string{"--config", "/my/config.file"}}, exp{consolidationError: true}, nil}, + { + opts{ + fs: getFS([]file{{"/my/config.file", `{"vus": 8, "duration": "2m"}`}}), + cli: []string{"--config", "/my/config.file"}, + }, exp{}, verifyConstantLoopingVUs(8, 120*time.Second), + }, + + //TODO: test combinations between options and levels + //TODO: test the future full overwriting of the duration/iterations/stages/execution options + + // Just in case, verify that no options will result in the same 1 vu 1 iter config + {opts{}, exp{}, verifyOneIterPerOneVU}, + //TODO: test for differences between flagsets + //TODO: more tests in general... + } } func runTestCase(t *testing.T, testCase configConsolidationTestCase, newFlagSet flagSetInit, logHook *testutils.SimpleLogrusHook) { @@ -259,8 +288,12 @@ func runTestCase(t *testing.T, testCase configConsolidationTestCase, newFlagSet if testCase.options.runner != nil { runner = &lib.MiniRunner{Options: *testCase.options.runner} } - fs := afero.NewMemMapFs() //TODO: test JSON configs as well! - result, err := getConsolidatedConfig(fs, cliConf, runner) + if testCase.options.fs == nil { + t.Logf("Creating an empty FS for this test") + testCase.options.fs = afero.NewMemMapFs() // create an empty FS if it wasn't supplied + } + + result, err := getConsolidatedConfig(testCase.options.fs, cliConf, runner) if testCase.expected.consolidationError { require.Error(t, err) return @@ -294,7 +327,7 @@ func TestConfigConsolidation(t *testing.T) { log.SetOutput(ioutil.Discard) defer log.SetOutput(os.Stderr) - for tcNum, testCase := range configConsolidationTestCases { + for tcNum, testCase := range getConfigConsolidationTestCases() { flagSetInits := testCase.options.cliFlagSetInits if flagSetInits == nil { // handle the most common case flagSetInits = mostFlagSets() From 9eb4379bd60a4dab2694eff9de660c0192f136ea Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Thu, 7 Mar 2019 17:29:03 +0200 Subject: [PATCH 29/36] Improve the comments on the funcitons that deal with the file config --- cmd/config.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/cmd/config.go b/cmd/config.go index a158d6e206d..8da45b8d1fe 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -115,7 +115,12 @@ func getConfig(flags *pflag.FlagSet) (Config, error) { }, nil } -// Reads a configuration file from disk and returns it and its path. +// Reads the configuration file from the supplied filesystem and returns it and its path. +// It will first try to see if the user eplicitly specified a custom config file and will +// try to read that. If there's a custom config specified and it couldn't be read or parsed, +// an error will be returned. +// If there's no custom config specified and no file exists in the default config path, it will +// return an empty config struct, the default config location and *no* error. func readDiskConfig(fs afero.Fs) (Config, string, error) { realConfigFilePath := configFilePath if realConfigFilePath == "" { @@ -142,7 +147,8 @@ func readDiskConfig(fs afero.Fs) (Config, string, error) { return conf, realConfigFilePath, err } -// Writes configuration back to disk. +// Serializes the configuration to a JSON file and writes it in the supplied +// location on the supplied filesystem func writeDiskConfig(fs afero.Fs, configPath string, conf Config) error { data, err := json.MarshalIndent(conf, "", " ") if err != nil { From 2de09235632189ed50eb2588971cb551f0ac95a0 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Fri, 8 Mar 2019 16:24:52 +0200 Subject: [PATCH 30/36] Add tests and fix a minor bug --- cmd/config.go | 22 +++-- cmd/config_consolidation_test.go | 136 +++++++++++++++++++++++++++---- 2 files changed, 133 insertions(+), 25 deletions(-) diff --git a/cmd/config.go b/cmd/config.go index 8da45b8d1fe..96f411efeea 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -116,7 +116,7 @@ func getConfig(flags *pflag.FlagSet) (Config, error) { } // Reads the configuration file from the supplied filesystem and returns it and its path. -// It will first try to see if the user eplicitly specified a custom config file and will +// It will first try to see if the user explicitly specified a custom config file and will // try to read that. If there's a custom config specified and it couldn't be read or parsed, // an error will be returned. // If there's no custom config specified and no file exists in the default config path, it will @@ -235,15 +235,19 @@ func buildExecutionConfig(conf Config) (Config, error) { ds.VUs = conf.VUs ds.Iterations = conf.Iterations result.Execution = scheduler.ConfigMap{lib.DefaultSchedulerName: ds} - } else if conf.Execution != nil { - //TODO: remove this warning in the next version - log.Warnf("The execution settings are not functional in this k6 release, they will be ignored") } else { - // No execution parameters whatsoever were specified, so we'll create a per-VU iterations config - // with 1 VU and 1 iteration. We're choosing the per-VU config, since that one could also - // be executed both locally, and in the cloud. - result.Execution = scheduler.ConfigMap{ - lib.DefaultSchedulerName: scheduler.NewPerVUIterationsConfig(lib.DefaultSchedulerName), + if conf.Execution != nil { // If someone set this, regardless if its empty + //TODO: remove this warning in the next version + log.Warnf("The execution settings are not functional in this k6 release, they will be ignored") + } + + if len(conf.Execution) == 0 { // If unset or set to empty + // No execution parameters whatsoever were specified, so we'll create a per-VU iterations config + // with 1 VU and 1 iteration. We're choosing the per-VU config, since that one could also + // be executed both locally, and in the cloud. + result.Execution = scheduler.ConfigMap{ + lib.DefaultSchedulerName: scheduler.NewPerVUIterationsConfig(lib.DefaultSchedulerName), + } } } diff --git a/cmd/config_consolidation_test.go b/cmd/config_consolidation_test.go index be68ea23375..0fe6afebe6e 100644 --- a/cmd/config_consolidation_test.go +++ b/cmd/config_consolidation_test.go @@ -82,7 +82,6 @@ func verifyOneIterPerOneVU(t *testing.T, c Config) { assert.Equal(t, null.NewBool(false, false), perVuIters.Interruptible) assert.Equal(t, null.NewInt(1, false), perVuIters.Iterations) assert.Equal(t, null.NewInt(1, false), perVuIters.VUs) - //TODO: verify shortcut options as well? } func verifySharedIters(vus, iters int64) func(t *testing.T, c Config) { @@ -94,11 +93,12 @@ func verifySharedIters(vus, iters int64) func(t *testing.T, c Config) { require.True(t, ok) assert.Equal(t, null.NewInt(vus, true), sharedIterConfig.VUs) assert.Equal(t, null.NewInt(iters, true), sharedIterConfig.Iterations) - //TODO: verify shortcut options as well? + assert.Equal(t, null.NewInt(vus, true), c.VUs) + assert.Equal(t, null.NewInt(iters, true), c.Iterations) } } -func verifyConstantLoopingVUs(vus int64, duration time.Duration) func(t *testing.T, c Config) { +func verifyConstLoopingVUs(vus int64, duration time.Duration) func(t *testing.T, c Config) { return func(t *testing.T, c Config) { sched := c.Execution[lib.DefaultSchedulerName] require.NotEmpty(t, sched) @@ -108,10 +108,47 @@ func verifyConstantLoopingVUs(vus int64, duration time.Duration) func(t *testing assert.Equal(t, null.NewBool(true, false), clvc.Interruptible) assert.Equal(t, null.NewInt(vus, true), clvc.VUs) assert.Equal(t, types.NullDurationFrom(duration), clvc.Duration) - //TODO: verify shortcut options as well? + assert.Equal(t, null.NewInt(vus, true), c.VUs) + assert.Equal(t, types.NullDurationFrom(duration), c.Duration) } } +func verifyVarLoopingVUs(startVus int64, startVUsSet bool, stages []scheduler.Stage) func(t *testing.T, c Config) { + return func(t *testing.T, c Config) { + sched := c.Execution[lib.DefaultSchedulerName] + require.NotEmpty(t, sched) + require.IsType(t, scheduler.VariableLoopingVUsConfig{}, sched) + clvc, ok := sched.(scheduler.VariableLoopingVUsConfig) + require.True(t, ok) + assert.Equal(t, null.NewBool(true, false), clvc.Interruptible) + assert.Equal(t, null.NewInt(startVus, startVUsSet), clvc.StartVUs) + assert.Equal(t, null.NewInt(startVus, startVUsSet), c.VUs) + assert.Equal(t, stages, clvc.Stages) + assert.Len(t, c.Stages, len(stages)) + for i, s := range stages { + assert.Equal(t, s.Duration, c.Stages[i].Duration) + assert.Equal(t, s.Target, c.Stages[i].Target) + } + } +} + +// A helper function that accepts (duration in second, VUs) pairs and returns +// a valid slice of stage structs +func buildStages(durationsAndVUs ...int64) []scheduler.Stage { + l := len(durationsAndVUs) + if l%2 != 0 { + panic("wrong len") + } + result := make([]scheduler.Stage, 0, l/2) + for i := 0; i < l; i += 2 { + result = append(result, scheduler.Stage{ + Duration: types.NullDurationFrom(time.Duration(durationsAndVUs[i]) * time.Second), + Target: null.IntFrom(durationsAndVUs[i+1]), + }) + } + return result +} + func mostFlagSets() []flagSetInit { //TODO: make this unnecessary... currently these are the only commands in which // getConsolidatedConfig() is used, but they also have differences in their CLI flags :/ @@ -141,6 +178,10 @@ func getFS(files []file) afero.Fs { return fs } +func defaultConfig(jsonConfig string) afero.Fs { + return getFS([]file{{defaultConfigFilePath, jsonConfig}}) +} + type flagSetInit func() *pflag.FlagSet type opts struct { @@ -214,43 +255,106 @@ func getConfigConsolidationTestCases() []configConsolidationTestCase { // Check if CLI shortcuts generate correct execution values {opts{cli: []string{"--vus", "1", "--iterations", "5"}}, exp{}, verifySharedIters(1, 5)}, {opts{cli: []string{"-u", "2", "-i", "6"}}, exp{}, verifySharedIters(2, 6)}, - {opts{cli: []string{"-u", "3", "-d", "30s"}}, exp{}, verifyConstantLoopingVUs(3, 30*time.Second)}, - {opts{cli: []string{"-u", "4", "--duration", "60s"}}, exp{}, verifyConstantLoopingVUs(4, 1*time.Minute)}, - //TODO: verify stages + {opts{cli: []string{"-u", "3", "-d", "30s"}}, exp{}, verifyConstLoopingVUs(3, 30*time.Second)}, + {opts{cli: []string{"-u", "4", "--duration", "60s"}}, exp{}, verifyConstLoopingVUs(4, 1*time.Minute)}, + { + opts{cli: []string{"--stage", "20s:10", "-s", "3m:5"}}, exp{}, + verifyVarLoopingVUs(1, false, buildStages(20, 10, 180, 5)), + }, + { + opts{cli: []string{"-s", "1m6s:5", "--vus", "10"}}, exp{}, + verifyVarLoopingVUs(10, true, buildStages(66, 5)), + }, // This should get a validation error since VUs are more than the shared iterations {opts{cli: []string{"--vus", "10", "-i", "6"}}, exp{validationErrors: true}, verifySharedIters(10, 6)}, // These should emit a warning + //TODO: in next version, those should be an error {opts{cli: []string{"-u", "1", "-i", "6", "-d", "10s"}}, exp{logWarning: true}, nil}, {opts{cli: []string{"-u", "2", "-d", "10s", "-s", "10s:20"}}, exp{logWarning: true}, nil}, {opts{cli: []string{"-u", "3", "-i", "5", "-s", "10s:20"}}, exp{logWarning: true}, nil}, {opts{cli: []string{"-u", "3", "-d", "0"}}, exp{logWarning: true}, nil}, + { + opts{runner: &lib.Options{ + VUs: null.IntFrom(5), + Duration: types.NullDurationFrom(44 * time.Second), + Iterations: null.IntFrom(10), + }}, exp{logWarning: true}, nil, + }, + {opts{fs: defaultConfig(`{"execution": {}}`)}, exp{logWarning: true}, verifyOneIterPerOneVU}, // Test if environment variable shortcuts are working as expected {opts{env: []string{"K6_VUS=5", "K6_ITERATIONS=15"}}, exp{}, verifySharedIters(5, 15)}, - {opts{env: []string{"K6_VUS=10", "K6_DURATION=20s"}}, exp{}, verifyConstantLoopingVUs(10, 20*time.Second)}, - //TODO: more env var tests - + {opts{env: []string{"K6_VUS=10", "K6_DURATION=20s"}}, exp{}, verifyConstLoopingVUs(10, 20*time.Second)}, + { + opts{env: []string{"K6_STAGES=2m30s:11,1h1m:100"}}, exp{}, + verifyVarLoopingVUs(1, false, buildStages(150, 11, 3660, 100)), + }, + { + opts{env: []string{"K6_STAGES=100s:100,0m30s:0", "K6_VUS=0"}}, exp{}, + verifyVarLoopingVUs(0, true, buildStages(100, 100, 30, 0)), + }, // Test if JSON configs work as expected - {opts{fs: getFS([]file{{defaultConfigFilePath, `{"iterations": 77, "vus": 7}`}})}, exp{}, verifySharedIters(7, 77)}, - {opts{fs: getFS([]file{{defaultConfigFilePath, `wrong-json`}})}, exp{consolidationError: true}, nil}, + {opts{fs: defaultConfig(`{"iterations": 77, "vus": 7}`)}, exp{}, verifySharedIters(7, 77)}, + {opts{fs: defaultConfig(`wrong-json`)}, exp{consolidationError: true}, nil}, {opts{fs: getFS(nil), cli: []string{"--config", "/my/config.file"}}, exp{consolidationError: true}, nil}, + + // Test combinations between options and levels { opts{ fs: getFS([]file{{"/my/config.file", `{"vus": 8, "duration": "2m"}`}}), cli: []string{"--config", "/my/config.file"}, - }, exp{}, verifyConstantLoopingVUs(8, 120*time.Second), + }, exp{}, verifyConstLoopingVUs(8, 120*time.Second), + }, + { + opts{ + runner: &lib.Options{VUs: null.IntFrom(5), Duration: types.NullDurationFrom(50 * time.Second)}, + cli: []string{"--iterations", "5"}, + }, + //TODO: this shouldn't be a warning in the next version, but the result will be different + exp{logWarning: true}, verifyConstLoopingVUs(5, 50*time.Second), + }, + { + opts{ + fs: defaultConfig(`{"stages": [{"duration": "20s", "target": 10}]}`), + runner: &lib.Options{VUs: null.IntFrom(5)}, + }, + exp{}, + verifyVarLoopingVUs(5, true, buildStages(20, 10)), + }, + { + opts{ + fs: defaultConfig(`{"stages": [{"duration": "20s", "target": 10}]}`), + runner: &lib.Options{VUs: null.IntFrom(5)}, + env: []string{"K6_VUS=15", "K6_ITERATIONS=15"}, + }, + exp{logWarning: true}, //TODO: this won't be a warning in the next version, but the result will be different + verifyVarLoopingVUs(15, true, buildStages(20, 10)), + }, + { + opts{ + fs: defaultConfig(`{"stages": [{"duration": "11s", "target": 11}]}`), + runner: &lib.Options{VUs: null.IntFrom(22)}, + env: []string{"K6_VUS=33"}, + cli: []string{"--stage", "44s:44", "-s", "55s:55"}, + }, + exp{}, + verifyVarLoopingVUs(33, true, buildStages(44, 44, 55, 55)), }, - //TODO: test combinations between options and levels //TODO: test the future full overwriting of the duration/iterations/stages/execution options // Just in case, verify that no options will result in the same 1 vu 1 iter config {opts{}, exp{}, verifyOneIterPerOneVU}, //TODO: test for differences between flagsets - //TODO: more tests in general... + //TODO: more tests in general, especially ones not related to execution parameters... } } -func runTestCase(t *testing.T, testCase configConsolidationTestCase, newFlagSet flagSetInit, logHook *testutils.SimpleLogrusHook) { +func runTestCase( + t *testing.T, + testCase configConsolidationTestCase, + newFlagSet flagSetInit, + logHook *testutils.SimpleLogrusHook, +) { t.Logf("Test with opts=%#v and exp=%#v\n", testCase.options, testCase.expected) log.SetOutput(testOutput{t}) logHook.Drain() From 079282a2b5d6ce1004004a81f943c8e214e08744 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Mon, 11 Mar 2019 13:50:17 +0200 Subject: [PATCH 31/36] Fix or suppress linter errors --- cmd/config.go | 12 ++++++++---- cmd/config_consolidation_test.go | 1 + cmd/root.go | 11 ++++++++++- lib/scheduler/configmap.go | 1 + lib/scheduler/helpers_test.go | 5 ++++- lib/scheduler/schedulers_test.go | 1 + 6 files changed, 25 insertions(+), 6 deletions(-) diff --git a/cmd/config.go b/cmd/config.go index 96f411efeea..1c57dad30be 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -180,7 +180,8 @@ func readEnvConfig() (conf Config, err error) { // stages) into the proper scheduler configuration func buildExecutionConfig(conf Config) (Config, error) { result := conf - if conf.Duration.Valid { + switch { + case conf.Duration.Valid: if conf.Iterations.Valid { //TODO: make this an error in the next version log.Warnf("Specifying both duration and iterations is deprecated and won't be supported in the future k6 versions") @@ -206,7 +207,8 @@ func buildExecutionConfig(conf Config) (Config, error) { ds.Interruptible = null.NewBool(true, false) // Preserve backwards compatibility result.Execution = scheduler.ConfigMap{lib.DefaultSchedulerName: ds} } - } else if conf.Stages != nil { + + case conf.Stages != nil: if conf.Iterations.Valid { //TODO: make this an error in the next version log.Warnf("Specifying both iterations and stages is deprecated and won't be supported in the future k6 versions") @@ -225,7 +227,8 @@ func buildExecutionConfig(conf Config) (Config, error) { } ds.Interruptible = null.NewBool(true, false) // Preserve backwards compatibility result.Execution = scheduler.ConfigMap{lib.DefaultSchedulerName: ds} - } else if conf.Iterations.Valid { + + case conf.Iterations.Valid: if conf.Execution != nil { return conf, errors.New("specifying both iterations and execution is not supported") } @@ -235,7 +238,8 @@ func buildExecutionConfig(conf Config) (Config, error) { ds.VUs = conf.VUs ds.Iterations = conf.Iterations result.Execution = scheduler.ConfigMap{lib.DefaultSchedulerName: ds} - } else { + + default: if conf.Execution != nil { // If someone set this, regardless if its empty //TODO: remove this warning in the next version log.Warnf("The execution settings are not functional in this k6 release, they will be ignored") diff --git a/cmd/config_consolidation_test.go b/cmd/config_consolidation_test.go index 0fe6afebe6e..82a619ce5f0 100644 --- a/cmd/config_consolidation_test.go +++ b/cmd/config_consolidation_test.go @@ -439,6 +439,7 @@ func TestConfigConsolidation(t *testing.T) { for fsNum, flagSet := range flagSetInits { // I want to paralelize this, but I cannot... due to global variables and other // questionable architectural choices... :| + testCase, flagSet := testCase, flagSet t.Run( fmt.Sprintf("TestCase#%d_FlagSet#%d", tcNum, fsNum), func(t *testing.T) { runTestCase(t, testCase, flagSet, &logHook) }, diff --git a/cmd/root.go b/cmd/root.go index 05337b37981..e5f5d558485 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -38,7 +38,13 @@ import ( "github.com/spf13/pflag" ) +// Version contains the current semantic version of k6. +//nolint:gochecknoglobals var Version = "0.23.1" + +// Banner contains the ASCII-art banner with the k6 logo and stylized website URL +//TODO: make these into methods, only the version needs to be a variable +//nolint:gochecknoglobals var Banner = strings.Join([]string{ ` /\ |‾‾| /‾‾/ /‾/ `, ` /\ / \ | |_/ / / / `, @@ -58,8 +64,11 @@ var ( const defaultConfigFileName = "config.json" +//TODO: remove these global variables +//nolint:gochecknoglobals var defaultConfigFilePath = defaultConfigFileName // Updated with the user's config folder in the init() function below -var configFilePath = os.Getenv("K6_CONFIG") // Overridden by `-c`/`--config` flag! +//nolint:gochecknoglobals +var configFilePath = os.Getenv("K6_CONFIG") // Overridden by `-c`/`--config` flag! var ( //TODO: have environment variables for configuring these? hopefully after we move away from global vars though... diff --git a/lib/scheduler/configmap.go b/lib/scheduler/configmap.go index 2445e6a1931..072f26f81e7 100644 --- a/lib/scheduler/configmap.go +++ b/lib/scheduler/configmap.go @@ -33,6 +33,7 @@ type ConfigMap map[string]Config // with the specified name and all default values correctly initialized type ConfigConstructor func(name string, rawJSON []byte) (Config, error) +//nolint:gochecknoglobals var ( configTypesMutex sync.RWMutex configConstructors = make(map[string]ConfigConstructor) diff --git a/lib/scheduler/helpers_test.go b/lib/scheduler/helpers_test.go index 6c07869b6e4..8165f922fd6 100644 --- a/lib/scheduler/helpers_test.go +++ b/lib/scheduler/helpers_test.go @@ -58,7 +58,10 @@ func TestStrictJSONUnmarshal(t *testing.T) { {`123`, true, &someElement{}, nil}, {`"blah"`, true, &someElement{}, nil}, {`null`, false, &someElement{}, &someElement{}}, - {`{"data": 123, "props": {"test": "mest"}}`, false, &someElement{}, &someElement{123, map[string]string{"test": "mest"}}}, + { + `{"data": 123, "props": {"test": "mest"}}`, false, &someElement{}, + &someElement{123, map[string]string{"test": "mest"}}, + }, {`{"data": 123, "props": {"test": "mest"}}asdg`, true, &someElement{}, nil}, } for i, tc := range testCases { diff --git a/lib/scheduler/schedulers_test.go b/lib/scheduler/schedulers_test.go index 4f64394069e..cbecca214f4 100644 --- a/lib/scheduler/schedulers_test.go +++ b/lib/scheduler/schedulers_test.go @@ -39,6 +39,7 @@ type configMapTestCase struct { customValidator func(t *testing.T, cm ConfigMap) } +//nolint:lll,gochecknoglobals var configMapTestCases = []configMapTestCase{ {"", true, false, nil}, {"1234", true, false, nil}, From 238e224116c52fc6fc4eb284691ce798ef5fb392 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Mon, 11 Mar 2019 14:55:09 +0200 Subject: [PATCH 32/36] Use a custom error type for execution conflict errors in the config --- cmd/config.go | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/cmd/config.go b/cmd/config.go index 1c57dad30be..31e28a53d35 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -33,7 +33,6 @@ import ( "github.com/loadimpact/k6/stats/influxdb" "github.com/loadimpact/k6/stats/kafka" "github.com/loadimpact/k6/stats/statsd/common" - "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/spf13/afero" "github.com/spf13/pflag" @@ -176,6 +175,14 @@ func readEnvConfig() (conf Config, err error) { return conf, nil } +type executionConflictConfigError string + +func (e executionConflictConfigError) Error() string { + return string(e) +} + +var _ error = executionConflictConfigError("") + // This checks for conflicting options and turns any shortcut options (i.e. duration, iterations, // stages) into the proper scheduler configuration func buildExecutionConfig(conf Config) (Config, error) { @@ -183,22 +190,21 @@ func buildExecutionConfig(conf Config) (Config, error) { switch { case conf.Duration.Valid: if conf.Iterations.Valid { - //TODO: make this an error in the next version + //TODO: make this an executionConflictConfigError in the next version log.Warnf("Specifying both duration and iterations is deprecated and won't be supported in the future k6 versions") } if conf.Stages != nil { - //TODO: make this an error in the next version + //TODO: make this an executionConflictConfigError in the next version log.Warnf("Specifying both duration and stages is deprecated and won't be supported in the future k6 versions") } if conf.Execution != nil { - //TODO: use a custom error type - return result, errors.New("specifying both duration and execution is not supported") + return result, executionConflictConfigError("specifying both duration and execution is not supported") } if conf.Duration.Duration <= 0 { - //TODO: make this an error in the next version + //TODO: make this an executionConflictConfigError in the next version log.Warnf("Specifying infinite duration in this way is deprecated and won't be supported in the future k6 versions") } else { ds := scheduler.NewConstantLoopingVUsConfig(lib.DefaultSchedulerName) @@ -210,12 +216,12 @@ func buildExecutionConfig(conf Config) (Config, error) { case conf.Stages != nil: if conf.Iterations.Valid { - //TODO: make this an error in the next version + //TODO: make this an executionConflictConfigError in the next version log.Warnf("Specifying both iterations and stages is deprecated and won't be supported in the future k6 versions") } if conf.Execution != nil { - return conf, errors.New("specifying both stages and execution is not supported") + return conf, executionConflictConfigError("specifying both stages and execution is not supported") } ds := scheduler.NewVariableLoopingVUsConfig(lib.DefaultSchedulerName) @@ -230,7 +236,7 @@ func buildExecutionConfig(conf Config) (Config, error) { case conf.Iterations.Valid: if conf.Execution != nil { - return conf, errors.New("specifying both iterations and execution is not supported") + return conf, executionConflictConfigError("specifying both iterations and execution is not supported") } // TODO: maybe add a new flag that will be used as a shortcut to per-VU iterations? From 6d19e615a3526704dcdf99c0e248d6812488d89a Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Tue, 12 Mar 2019 15:13:41 +0200 Subject: [PATCH 33/36] Improve config consolidation, default values and tests --- cmd/config.go | 4 +- cmd/config_consolidation_test.go | 66 +++++++++++++++----------- lib/scheduler/base_config.go | 4 +- lib/scheduler/constant_arrival_rate.go | 2 +- lib/scheduler/constant_looping_vus.go | 11 +++-- lib/scheduler/schedulers_test.go | 4 +- 6 files changed, 52 insertions(+), 39 deletions(-) diff --git a/cmd/config.go b/cmd/config.go index 31e28a53d35..ba2e696624d 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -194,7 +194,7 @@ func buildExecutionConfig(conf Config) (Config, error) { log.Warnf("Specifying both duration and iterations is deprecated and won't be supported in the future k6 versions") } - if conf.Stages != nil { + if len(conf.Stages) > 0 { // stages isn't nil (not set) and isn't explicitly set to empty //TODO: make this an executionConflictConfigError in the next version log.Warnf("Specifying both duration and stages is deprecated and won't be supported in the future k6 versions") } @@ -214,7 +214,7 @@ func buildExecutionConfig(conf Config) (Config, error) { result.Execution = scheduler.ConfigMap{lib.DefaultSchedulerName: ds} } - case conf.Stages != nil: + case len(conf.Stages) > 0: // stages isn't nil (not set) and isn't explicitly set to empty if conf.Iterations.Valid { //TODO: make this an executionConflictConfigError in the next version log.Warnf("Specifying both iterations and stages is deprecated and won't be supported in the future k6 versions") diff --git a/cmd/config_consolidation_test.go b/cmd/config_consolidation_test.go index 82a619ce5f0..5f5451e4f52 100644 --- a/cmd/config_consolidation_test.go +++ b/cmd/config_consolidation_test.go @@ -84,21 +84,21 @@ func verifyOneIterPerOneVU(t *testing.T, c Config) { assert.Equal(t, null.NewInt(1, false), perVuIters.VUs) } -func verifySharedIters(vus, iters int64) func(t *testing.T, c Config) { +func verifySharedIters(vus, iters null.Int) func(t *testing.T, c Config) { return func(t *testing.T, c Config) { sched := c.Execution[lib.DefaultSchedulerName] require.NotEmpty(t, sched) require.IsType(t, scheduler.SharedIteationsConfig{}, sched) sharedIterConfig, ok := sched.(scheduler.SharedIteationsConfig) require.True(t, ok) - assert.Equal(t, null.NewInt(vus, true), sharedIterConfig.VUs) - assert.Equal(t, null.NewInt(iters, true), sharedIterConfig.Iterations) - assert.Equal(t, null.NewInt(vus, true), c.VUs) - assert.Equal(t, null.NewInt(iters, true), c.Iterations) + assert.Equal(t, vus, sharedIterConfig.VUs) + assert.Equal(t, iters, sharedIterConfig.Iterations) + assert.Equal(t, vus, c.VUs) + assert.Equal(t, iters, c.Iterations) } } -func verifyConstLoopingVUs(vus int64, duration time.Duration) func(t *testing.T, c Config) { +func verifyConstLoopingVUs(vus null.Int, duration time.Duration) func(t *testing.T, c Config) { return func(t *testing.T, c Config) { sched := c.Execution[lib.DefaultSchedulerName] require.NotEmpty(t, sched) @@ -106,14 +106,14 @@ func verifyConstLoopingVUs(vus int64, duration time.Duration) func(t *testing.T, clvc, ok := sched.(scheduler.ConstantLoopingVUsConfig) require.True(t, ok) assert.Equal(t, null.NewBool(true, false), clvc.Interruptible) - assert.Equal(t, null.NewInt(vus, true), clvc.VUs) + assert.Equal(t, vus, clvc.VUs) assert.Equal(t, types.NullDurationFrom(duration), clvc.Duration) - assert.Equal(t, null.NewInt(vus, true), c.VUs) + assert.Equal(t, vus, c.VUs) assert.Equal(t, types.NullDurationFrom(duration), c.Duration) } } -func verifyVarLoopingVUs(startVus int64, startVUsSet bool, stages []scheduler.Stage) func(t *testing.T, c Config) { +func verifyVarLoopingVUs(startVus null.Int, stages []scheduler.Stage) func(t *testing.T, c Config) { return func(t *testing.T, c Config) { sched := c.Execution[lib.DefaultSchedulerName] require.NotEmpty(t, sched) @@ -121,8 +121,8 @@ func verifyVarLoopingVUs(startVus int64, startVUsSet bool, stages []scheduler.St clvc, ok := sched.(scheduler.VariableLoopingVUsConfig) require.True(t, ok) assert.Equal(t, null.NewBool(true, false), clvc.Interruptible) - assert.Equal(t, null.NewInt(startVus, startVUsSet), clvc.StartVUs) - assert.Equal(t, null.NewInt(startVus, startVUsSet), c.VUs) + assert.Equal(t, startVus, clvc.StartVUs) + assert.Equal(t, startVus, c.VUs) assert.Equal(t, stages, clvc.Stages) assert.Len(t, c.Stages, len(stages)) for i, s := range stages { @@ -241,6 +241,7 @@ type configConsolidationTestCase struct { } func getConfigConsolidationTestCases() []configConsolidationTestCase { + I := null.IntFrom // shortcut for "Valid" (i.e. user-specified) ints // This is a function, because some of these test cases actually need for the init() functions // to be executed, since they depend on defaultConfigFilePath return []configConsolidationTestCase{ @@ -253,20 +254,21 @@ func getConfigConsolidationTestCases() []configConsolidationTestCase { {opts{cli: []string{"--execution", ""}}, exp{cliParseError: true}, nil}, {opts{cli: []string{"--stage", "10:20s"}}, exp{cliReadError: true}, nil}, // Check if CLI shortcuts generate correct execution values - {opts{cli: []string{"--vus", "1", "--iterations", "5"}}, exp{}, verifySharedIters(1, 5)}, - {opts{cli: []string{"-u", "2", "-i", "6"}}, exp{}, verifySharedIters(2, 6)}, - {opts{cli: []string{"-u", "3", "-d", "30s"}}, exp{}, verifyConstLoopingVUs(3, 30*time.Second)}, - {opts{cli: []string{"-u", "4", "--duration", "60s"}}, exp{}, verifyConstLoopingVUs(4, 1*time.Minute)}, + {opts{cli: []string{"--vus", "1", "--iterations", "5"}}, exp{}, verifySharedIters(I(1), I(5))}, + {opts{cli: []string{"-u", "2", "-i", "6"}}, exp{}, verifySharedIters(I(2), I(6))}, + {opts{cli: []string{"-d", "123s"}}, exp{}, verifyConstLoopingVUs(null.NewInt(1, false), 123*time.Second)}, + {opts{cli: []string{"-u", "3", "-d", "30s"}}, exp{}, verifyConstLoopingVUs(I(3), 30*time.Second)}, + {opts{cli: []string{"-u", "4", "--duration", "60s"}}, exp{}, verifyConstLoopingVUs(I(4), 1*time.Minute)}, { opts{cli: []string{"--stage", "20s:10", "-s", "3m:5"}}, exp{}, - verifyVarLoopingVUs(1, false, buildStages(20, 10, 180, 5)), + verifyVarLoopingVUs(null.NewInt(1, false), buildStages(20, 10, 180, 5)), }, { opts{cli: []string{"-s", "1m6s:5", "--vus", "10"}}, exp{}, - verifyVarLoopingVUs(10, true, buildStages(66, 5)), + verifyVarLoopingVUs(null.NewInt(10, true), buildStages(66, 5)), }, // This should get a validation error since VUs are more than the shared iterations - {opts{cli: []string{"--vus", "10", "-i", "6"}}, exp{validationErrors: true}, verifySharedIters(10, 6)}, + {opts{cli: []string{"--vus", "10", "-i", "6"}}, exp{validationErrors: true}, verifySharedIters(I(10), I(6))}, // These should emit a warning //TODO: in next version, those should be an error {opts{cli: []string{"-u", "1", "-i", "6", "-d", "10s"}}, exp{logWarning: true}, nil}, @@ -282,18 +284,18 @@ func getConfigConsolidationTestCases() []configConsolidationTestCase { }, {opts{fs: defaultConfig(`{"execution": {}}`)}, exp{logWarning: true}, verifyOneIterPerOneVU}, // Test if environment variable shortcuts are working as expected - {opts{env: []string{"K6_VUS=5", "K6_ITERATIONS=15"}}, exp{}, verifySharedIters(5, 15)}, - {opts{env: []string{"K6_VUS=10", "K6_DURATION=20s"}}, exp{}, verifyConstLoopingVUs(10, 20*time.Second)}, + {opts{env: []string{"K6_VUS=5", "K6_ITERATIONS=15"}}, exp{}, verifySharedIters(I(5), I(15))}, + {opts{env: []string{"K6_VUS=10", "K6_DURATION=20s"}}, exp{}, verifyConstLoopingVUs(I(10), 20*time.Second)}, { opts{env: []string{"K6_STAGES=2m30s:11,1h1m:100"}}, exp{}, - verifyVarLoopingVUs(1, false, buildStages(150, 11, 3660, 100)), + verifyVarLoopingVUs(null.NewInt(1, false), buildStages(150, 11, 3660, 100)), }, { opts{env: []string{"K6_STAGES=100s:100,0m30s:0", "K6_VUS=0"}}, exp{}, - verifyVarLoopingVUs(0, true, buildStages(100, 100, 30, 0)), + verifyVarLoopingVUs(null.NewInt(0, true), buildStages(100, 100, 30, 0)), }, // Test if JSON configs work as expected - {opts{fs: defaultConfig(`{"iterations": 77, "vus": 7}`)}, exp{}, verifySharedIters(7, 77)}, + {opts{fs: defaultConfig(`{"iterations": 77, "vus": 7}`)}, exp{}, verifySharedIters(I(7), I(77))}, {opts{fs: defaultConfig(`wrong-json`)}, exp{consolidationError: true}, nil}, {opts{fs: getFS(nil), cli: []string{"--config", "/my/config.file"}}, exp{consolidationError: true}, nil}, @@ -302,7 +304,15 @@ func getConfigConsolidationTestCases() []configConsolidationTestCase { opts{ fs: getFS([]file{{"/my/config.file", `{"vus": 8, "duration": "2m"}`}}), cli: []string{"--config", "/my/config.file"}, - }, exp{}, verifyConstLoopingVUs(8, 120*time.Second), + }, exp{}, verifyConstLoopingVUs(I(8), 120*time.Second), + }, + { + opts{ + fs: defaultConfig(`{"stages": [{"duration": "20s", "target": 20}], "vus": 10}`), + env: []string{"K6_DURATION=15s"}, + cli: []string{"--stage", ""}, + }, + exp{}, verifyConstLoopingVUs(I(10), 15*time.Second), }, { opts{ @@ -310,7 +320,7 @@ func getConfigConsolidationTestCases() []configConsolidationTestCase { cli: []string{"--iterations", "5"}, }, //TODO: this shouldn't be a warning in the next version, but the result will be different - exp{logWarning: true}, verifyConstLoopingVUs(5, 50*time.Second), + exp{logWarning: true}, verifyConstLoopingVUs(I(5), 50*time.Second), }, { opts{ @@ -318,7 +328,7 @@ func getConfigConsolidationTestCases() []configConsolidationTestCase { runner: &lib.Options{VUs: null.IntFrom(5)}, }, exp{}, - verifyVarLoopingVUs(5, true, buildStages(20, 10)), + verifyVarLoopingVUs(null.NewInt(5, true), buildStages(20, 10)), }, { opts{ @@ -327,7 +337,7 @@ func getConfigConsolidationTestCases() []configConsolidationTestCase { env: []string{"K6_VUS=15", "K6_ITERATIONS=15"}, }, exp{logWarning: true}, //TODO: this won't be a warning in the next version, but the result will be different - verifyVarLoopingVUs(15, true, buildStages(20, 10)), + verifyVarLoopingVUs(null.NewInt(15, true), buildStages(20, 10)), }, { opts{ @@ -337,7 +347,7 @@ func getConfigConsolidationTestCases() []configConsolidationTestCase { cli: []string{"--stage", "44s:44", "-s", "55s:55"}, }, exp{}, - verifyVarLoopingVUs(33, true, buildStages(44, 44, 55, 55)), + verifyVarLoopingVUs(null.NewInt(33, true), buildStages(44, 44, 55, 55)), }, //TODO: test the future full overwriting of the duration/iterations/stages/execution options diff --git a/lib/scheduler/base_config.go b/lib/scheduler/base_config.go index efba8cb1c3a..73a4c22a4b4 100644 --- a/lib/scheduler/base_config.go +++ b/lib/scheduler/base_config.go @@ -77,8 +77,8 @@ func (bc BaseConfig) Validate() (errors []error) { errors = append(errors, fmt.Errorf("exec value cannot be empty")) } // The actually reasonable checks: - if bc.StartTime.Valid && bc.StartTime.Duration < 0 { - errors = append(errors, fmt.Errorf("scheduler start time should be positive")) + if bc.StartTime.Duration < 0 { + errors = append(errors, fmt.Errorf("scheduler start time can't be negative")) } iterTimeout := time.Duration(bc.IterationTimeout.Duration) if iterTimeout < 0 || iterTimeout > maxIterationTimeout { diff --git a/lib/scheduler/constant_arrival_rate.go b/lib/scheduler/constant_arrival_rate.go index 0234c3a3465..ed53297e924 100644 --- a/lib/scheduler/constant_arrival_rate.go +++ b/lib/scheduler/constant_arrival_rate.go @@ -69,7 +69,7 @@ func (carc ConstantArrivalRateConfig) Validate() []error { if !carc.Rate.Valid { errors = append(errors, fmt.Errorf("the iteration rate isn't specified")) } else if carc.Rate.Int64 <= 0 { - errors = append(errors, fmt.Errorf("the iteration rate should be positive")) + errors = append(errors, fmt.Errorf("the iteration rate should be more than 0")) } if time.Duration(carc.TimeUnit.Duration) <= 0 { diff --git a/lib/scheduler/constant_looping_vus.go b/lib/scheduler/constant_looping_vus.go index dd67e9cdf17..a6293cb1b17 100644 --- a/lib/scheduler/constant_looping_vus.go +++ b/lib/scheduler/constant_looping_vus.go @@ -51,7 +51,10 @@ type ConstantLoopingVUsConfig struct { // NewConstantLoopingVUsConfig returns a ConstantLoopingVUsConfig with default values func NewConstantLoopingVUsConfig(name string) ConstantLoopingVUsConfig { - return ConstantLoopingVUsConfig{BaseConfig: NewBaseConfig(name, constantLoopingVUsType, false)} + return ConstantLoopingVUsConfig{ + BaseConfig: NewBaseConfig(name, constantLoopingVUsType, false), + VUs: null.NewInt(1, false), + } } // Make sure we implement the Config interface @@ -60,10 +63,8 @@ var _ Config = &ConstantLoopingVUsConfig{} // Validate makes sure all options are configured and valid func (lcv ConstantLoopingVUsConfig) Validate() []error { errors := lcv.BaseConfig.Validate() - if !lcv.VUs.Valid { - errors = append(errors, fmt.Errorf("the number of VUs isn't specified")) - } else if lcv.VUs.Int64 < 0 { - errors = append(errors, fmt.Errorf("the number of VUs shouldn't be negative")) + if lcv.VUs.Int64 <= 0 { + errors = append(errors, fmt.Errorf("the number of VUs should be more than 0")) } if !lcv.Duration.Valid { diff --git a/lib/scheduler/schedulers_test.go b/lib/scheduler/schedulers_test.go index cbecca214f4..11c07453f64 100644 --- a/lib/scheduler/schedulers_test.go +++ b/lib/scheduler/schedulers_test.go @@ -76,10 +76,12 @@ var configMapTestCases = []configMapTestCase{ assert.Equal(t, int64(10), cm["someKey"].GetMaxVUs()) assert.Empty(t, cm["someKey"].Validate()) }}, + {`{"aname": {"type": "constant-looping-vus", "duration": "60s"}}`, false, false, nil}, {`{"": {"type": "constant-looping-vus", "vus": 10, "duration": "60s"}}`, false, true, nil}, {`{"aname": {"type": "constant-looping-vus"}}`, false, true, nil}, + {`{"aname": {"type": "constant-looping-vus", "vus": 0.5}}`, true, false, nil}, {`{"aname": {"type": "constant-looping-vus", "vus": 10}}`, false, true, nil}, - {`{"aname": {"type": "constant-looping-vus", "duration": "60s"}}`, false, true, nil}, + {`{"aname": {"type": "constant-looping-vus", "vus": 0, "duration": "60s"}}`, false, true, nil}, {`{"aname": {"type": "constant-looping-vus", "vus": -1, "duration": "60s"}}`, false, true, nil}, {`{"aname": {"type": "constant-looping-vus", "vus": 10, "duration": "0s"}}`, false, true, nil}, {`{"aname": {"type": "constant-looping-vus", "vus": 10, "duration": "10s", "startTime": "-10s"}}`, false, true, nil}, From 9404af6b206676ff1e65478e3d081cc7c613017e Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Tue, 12 Mar 2019 15:53:13 +0200 Subject: [PATCH 34/36] Extend config validation to the cloud and archive subcommands --- cmd/archive.go | 4 ++++ cmd/cloud.go | 4 ++++ cmd/config.go | 21 +++++++++++++++++++++ cmd/run.go | 17 ++++++----------- 4 files changed, 35 insertions(+), 11 deletions(-) diff --git a/cmd/archive.go b/cmd/archive.go index 893e71afef2..3fe9734cd8d 100644 --- a/cmd/archive.go +++ b/cmd/archive.go @@ -76,6 +76,10 @@ An archive is a fully self-contained test run, and can be executed identically e return err } + if cerr := validateConfig(conf); cerr != nil { + return ExitCode{cerr, invalidConfigErrorCode} + } + err = r.SetOptions(conf.Options) if err != nil { return err diff --git a/cmd/cloud.go b/cmd/cloud.go index 49178a91a81..89f71aaf0a6 100644 --- a/cmd/cloud.go +++ b/cmd/cloud.go @@ -95,6 +95,10 @@ This will execute the test on the Load Impact cloud service. Use "k6 login cloud return err } + if cerr := validateConfig(conf); cerr != nil { + return ExitCode{cerr, invalidConfigErrorCode} + } + err = r.SetOptions(conf.Options) if err != nil { return err diff --git a/cmd/config.go b/cmd/config.go index ba2e696624d..5afb2d30ca8 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -22,8 +22,12 @@ package cmd import ( "encoding/json" + "fmt" "os" "path/filepath" + "strings" + + "errors" "github.com/kelseyhightower/envconfig" "github.com/loadimpact/k6/lib" @@ -300,3 +304,20 @@ func getConsolidatedConfig(fs afero.Fs, cliConf Config, runner lib.Runner) (conf return buildExecutionConfig(conf) } + +func validateConfig(conf Config) error { + errList := conf.Validate() + if len(errList) == 0 { + return nil + } + + errMsgParts := []string{"There were problems with the specified script configuration:"} + for _, err := range errList { + errMsgParts = append(errMsgParts, fmt.Sprintf("\t- %s", err.Error())) + } + errMsg := errors.New(strings.Join(errMsgParts, "\n")) + + //TODO: actually return the error here instead of warning, so k6 aborts on config validation errors + log.Warn(errMsg) + return nil +} diff --git a/cmd/run.go b/cmd/run.go index d9eccbeb962..c0fcba608ec 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -62,7 +62,7 @@ const ( teardownTimeoutErrorCode = 101 genericTimeoutErrorCode = 102 genericEngineErrorCode = 103 - //invalidOptionsErrorCode = 104 + invalidConfigErrorCode = 104 ) var ( @@ -164,21 +164,16 @@ a commandline interface for interacting with it.`, ) } + //TODO: move a bunch of the logic above to a config "constructor" and to the Validate() method + // If duration is explicitly set to 0, it means run forever. + //TODO: just... handle this differently, e.g. as a part of the manual executor if conf.Duration.Valid && conf.Duration.Duration == 0 { conf.Duration = types.NullDuration{} } - //TODO: move a bunch of the logic above to a config "constructor" and to the Validate() method - if errList := conf.Validate(); len(errList) != 0 { - - errMsg := []string{"There were problems with the specified script configuration:"} - for _, err := range errList { - errMsg = append(errMsg, fmt.Sprintf("\t- %s", err.Error())) - } - //TODO: re-enable exiting with validation errors - log.Warn(errors.New(strings.Join(errMsg, "\n"))) - //return ExitCode{errors.New(strings.Join(errMsg, "\n")), invalidOptionsErrorCode} + if cerr := validateConfig(conf); cerr != nil { + return ExitCode{cerr, invalidConfigErrorCode} } // If summary trend stats are defined, update the UI to reflect them From 249812174ff92e68891daaa202847d60ab093e56 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Tue, 12 Mar 2019 15:59:56 +0200 Subject: [PATCH 35/36] Silence a linter warning... for now! --- cmd/config.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/config.go b/cmd/config.go index 5afb2d30ca8..139b1e01286 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -305,6 +305,8 @@ func getConsolidatedConfig(fs afero.Fs, cliConf Config, runner lib.Runner) (conf return buildExecutionConfig(conf) } +//TODO: remove ↓ +//nolint:unparam func validateConfig(conf Config) error { errList := conf.Validate() if len(errList) == 0 { From d88be34caa1935042f5134de17e24e71a6bb8187 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Wed, 13 Mar 2019 11:02:17 +0200 Subject: [PATCH 36/36] Override the execution setting when execution shortcuts are used The `execution` setting is the only one that could cause an error at this time, when both it and a shortcut (duration/iterations/stages) are set, even if it's not used by k6 itself in the PR. So it needs to be zeroed-out when shortcuts are used, otherwise it couldn't be overwritten at all. --- cmd/config_consolidation_test.go | 12 ++++++++++++ lib/options.go | 12 ++++++------ 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/cmd/config_consolidation_test.go b/cmd/config_consolidation_test.go index 5f5451e4f52..fd8918aed3a 100644 --- a/cmd/config_consolidation_test.go +++ b/cmd/config_consolidation_test.go @@ -351,6 +351,18 @@ func getConfigConsolidationTestCases() []configConsolidationTestCase { }, //TODO: test the future full overwriting of the duration/iterations/stages/execution options + { + opts{ + fs: defaultConfig(`{ + "execution": { "someKey": { + "type": "constant-looping-vus", "vus": 10, "duration": "60s", "interruptible": false, + "iterationTimeout": "10s", "startTime": "70s", "env": {"test": "mest"}, "exec": "someFunc" + }}}`), + env: []string{"K6_ITERATIONS=25"}, + cli: []string{"--vus", "12"}, + }, + exp{}, verifySharedIters(I(12), I(25)), + }, // Just in case, verify that no options will result in the same 1 vu 1 iter config {opts{}, exp{}, verifyOneIterPerOneVU}, diff --git a/lib/options.go b/lib/options.go index bf228c0ad63..f4a4657e401 100644 --- a/lib/options.go +++ b/lib/options.go @@ -327,15 +327,15 @@ func (o Options) Apply(opts Options) Options { // Still, if more than one of those options is simultaneously specified in the same // config tier, they will be preserved, so the validation after we've consolidated // all of the options can return an error. - //TODO: uncomment this after we start using the new schedulers - /* - if opts.Duration.Valid || opts.Iterations.Valid || opts.Stages != nil || o.Execution != nil { + if opts.Duration.Valid || opts.Iterations.Valid || opts.Stages != nil || opts.Execution != nil { + //TODO: uncomment this after we start using the new schedulers + /* o.Duration = types.NewNullDuration(0, false) o.Iterations = null.NewInt(0, false) o.Stages = nil - o.Execution = nil - } - */ + */ + o.Execution = nil + } if opts.Duration.Valid { o.Duration = opts.Duration