-
Notifications
You must be signed in to change notification settings - Fork 1.3k
/
config.go
225 lines (206 loc) · 11.9 KB
/
config.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
/*
*
* k6 - a next-generation load testing tool
* Copyright (C) 2017 Load Impact
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package cloud
import (
"time"
"github.com/loadimpact/k6/lib/types"
"gopkg.in/guregu/null.v3"
)
// Config holds all the necessary data and options for sending metrics to the Load Impact cloud.
type Config struct {
// TODO: refactor common stuff between cloud execution and output
Token null.String `json:"token" envconfig:"CLOUD_TOKEN"`
DeprecatedToken null.String `json:"-" envconfig:"K6CLOUD_TOKEN"`
ProjectID null.Int `json:"projectID" envconfig:"CLOUD_PROJECT_ID"`
Name null.String `json:"name" envconfig:"CLOUD_NAME"`
Host null.String `json:"host" envconfig:"CLOUD_HOST"`
WebAppURL null.String `json:"webAppURL" envconfig:"CLOUD_WEB_APP_URL"`
NoCompress null.Bool `json:"noCompress" envconfig:"CLOUD_NO_COMPRESS"`
MaxMetricSamplesPerPackage null.Int `json:"maxMetricSamplesPerPackage" envconfig:"CLOUD_MAX_METRIC_SAMPLES_PER_PACKAGE"`
// The time interval between periodic API calls for sending samples to the cloud ingest service.
MetricPushInterval types.NullDuration `json:"metricPushInterval" envconfig:"CLOUD_METRIC_PUSH_INTERVAL"`
// Aggregation docs:
//
// If AggregationPeriod is specified and if it is greater than 0, HTTP metric aggregation
// with that period will be enabled. The general algorithm is this:
// - HTTP trail samples will be collected separately and not
// included in the default sample buffer (which is directly sent
// to the cloud service every MetricPushInterval).
// - On every AggregationCalcInterval, all collected HTTP Trails will be
// split into AggregationPeriod-sized time buckets (time slots) and
// then into sub-buckets according to their tags (each sub-bucket
// will contain only HTTP trails with the same sample tags -
// proto, staus, URL, method, etc.).
// - If at that time the specified AggregationWaitPeriod has not passed
// for a particular time bucket, it will be left undisturbed until the next
// AggregationCalcInterval tick comes along.
// - If AggregationWaitPeriod has passed for a time bucket, all of its
// sub-buckets will be traversed. Any sub-buckets that have less than
// AggregationMinSamples HTTP trails in them will not be aggregated.
// Instead the HTTP trails in them will just be individually added
// to the default sample buffer, like they would be if there was no
// aggregation.
// - Sub-buckets with at least AggregationMinSamples HTTP trails on the
// other hand will be aggregated according to the algorithm below:
// - If AggregationSkipOutlierDetection is enabled, all of the collected
// HTTP trails in that sub-bucket will be directly aggregated into a single
// compoind metric sample, without any attempt at outlier detection.
// IMPORTANT: This is intended only for testing purposes only or, in
// extreme cases, when the resulting metrics' precision isn't very important,
// since it could lead to a huge loss of granularity and the masking
// of any outlier samples in the data.
// - By default (since AggregationSkipOutlierDetection is not enabled),
// the collected HTTP trails will be checked for outliers, so we don't loose
// granularity by accidentally aggregating them. That happens by finding
// the "quartiles" (by default the 75th and 25th percentiles) in the
// sub-bucket datapoints and using the inter-quartile range (IQR) to find
// any outliers (https://en.wikipedia.org/wiki/Interquartile_range#Outliers,
// though the specific parameters and coefficients can be customized
// by the AggregationOutlier{Radius,CoefLower,CoefUpper} options)
// - Depending on the number of samples in the sub-bucket, two different
// algorithms could be used to calculate the quartiles. If there are
// fewer samples (between AggregationMinSamples and AggregationOutlierAlgoThreshold),
// then a more precise but also more computationally-heavy sorting-based algorithm
// will be used. For sub-buckets with more samples, a lighter quickselect-based
// algorithm will be used, potentially with a very minor loss of precision.
// - Regardless of the used algorithm, once the quartiles for that sub-bucket
// are found and the IQR is calculated, every HTTP trail in the sub-bucket will
// be checked if it seems like an outlier. HTTP trails are evaluated by two different
// criteria whether they seem like outliers - by their total connection time (i.e.
// http_req_connecting + http_req_tls_handshaking) and by their total request time
// (i.e. http_req_sending + http_req_waiting + http_req_receiving). If any of those
// properties of an HTTP trail is out of the calculated "normal" bounds for the
// sub-bucket, it will be considered an outlier and will be sent to the cloud
// individually - it's simply added to the default sample buffer, like it would
// be if there was no aggregation.
// - Finally, all non-outliers are aggregated and the resultig single metric is also
// added to the default sample buffer for sending to the cloud ingest service
// on the next MetricPushInterval event.
// If specified and is greater than 0, sample aggregation with that period is enabled
AggregationPeriod types.NullDuration `json:"aggregationPeriod" envconfig:"CLOUD_AGGREGATION_PERIOD"`
// If aggregation is enabled, this is how often new HTTP trails will be sorted into buckets and sub-buckets and aggregated.
AggregationCalcInterval types.NullDuration `json:"aggregationCalcInterval" envconfig:"CLOUD_AGGREGATION_CALC_INTERVAL"`
// If aggregation is enabled, this specifies how long we'll wait for period samples to accumulate before trying to aggregate them.
AggregationWaitPeriod types.NullDuration `json:"aggregationWaitPeriod" envconfig:"CLOUD_AGGREGATION_WAIT_PERIOD"`
// If aggregation is enabled, but the collected samples for a certain AggregationPeriod after AggregationPushDelay has passed are less than this number, they won't be aggregated.
AggregationMinSamples null.Int `json:"aggregationMinSamples" envconfig:"CLOUD_AGGREGATION_MIN_SAMPLES"`
// If this is enabled and a sub-bucket has more than AggregationMinSamples HTTP trails in it, they would all be
// aggregated without attempting to find and separate any outlier metrics first.
// IMPORTANT: This is intended for testing purposes only or, in extreme cases, when the result precision
// isn't very important and the improved aggregation percentage would be worth the potentially huge loss
// of metric granularity and possible masking of any outlier samples.
AggregationSkipOutlierDetection null.Bool `json:"aggregationSkipOutlierDetection" envconfig:"CLOUD_AGGREGATION_SKIP_OUTLIER_DETECTION"`
// If aggregation and outlier detection are enabled, this option specifies the
// number of HTTP trails in a sub-bucket that determine which quartile-calculating
// algorithm would be used:
// - for fewer samples (between MinSamples and OutlierAlgoThreshold), a more precise
// (i.e. supporting interpolation), but also more computationally-heavy sorting
// algorithm will be used to find the quartiles.
// - if there are more samples than OutlierAlgoThreshold in the sub-bucket, a
// QuickSelect-based (https://en.wikipedia.org/wiki/Quickselect) algorithm will
// be used. It doesn't support interpolation, so there's a small loss of precision
// in the outlier detection, but it's not as resource-heavy as the sorting algorithm.
AggregationOutlierAlgoThreshold null.Int `json:"aggregationOutlierAlgoThreshold" envconfig:"CLOUD_AGGREGATION_OUTLIER_ALGO_THRESHOLD"`
// The radius (as a fraction) from the median at which to sample Q1 and Q3.
// By default it's one quarter (0.25) and if set to something different, the Q in IQR
// won't make much sense... But this would allow us to select tighter sample groups for
// aggregation if we want.
AggregationOutlierIqrRadius null.Float `json:"aggregationOutlierIqrRadius" envconfig:"CLOUD_AGGREGATION_OUTLIER_IQR_RADIUS"`
// Connection or request times with how many IQRs below Q1 to consier as non-aggregatable outliers.
AggregationOutlierIqrCoefLower null.Float `json:"aggregationOutlierIqrCoefLower" envconfig:"CLOUD_AGGREGATION_OUTLIER_IQR_COEF_LOWER"`
// Connection or request times with how many IQRs above Q3 to consier as non-aggregatable outliers.
AggregationOutlierIqrCoefUpper null.Float `json:"aggregationOutlierIqrCoefUpper" envconfig:"CLOUD_AGGREGATION_OUTLIER_IQR_COEF_UPPER"`
}
// NewConfig creates a new Config instance with default values for some fields.
func NewConfig() Config {
return Config{
Host: null.NewString("https://ingest.loadimpact.com", false),
WebAppURL: null.NewString("https://app.loadimpact.com", false),
MetricPushInterval: types.NewNullDuration(1*time.Second, false),
MaxMetricSamplesPerPackage: null.NewInt(100000, false),
// Aggregation is disabled by default, since AggregationPeriod has no default value
// but if it's enabled manually or from the cloud service, those are the default values it will use:
AggregationCalcInterval: types.NewNullDuration(3*time.Second, false),
AggregationWaitPeriod: types.NewNullDuration(5*time.Second, false),
AggregationMinSamples: null.NewInt(25, false),
AggregationOutlierAlgoThreshold: null.NewInt(75, false),
AggregationOutlierIqrRadius: null.NewFloat(0.25, false),
AggregationOutlierIqrCoefLower: null.NewFloat(1.5, false),
AggregationOutlierIqrCoefUpper: null.NewFloat(1.3, false),
}
}
// Apply saves config non-zero config values from the passed config in the receiver.
func (c Config) Apply(cfg Config) Config {
if cfg.Token.Valid {
c.Token = cfg.Token
}
if cfg.DeprecatedToken.Valid {
c.DeprecatedToken = cfg.DeprecatedToken
}
if cfg.Name.Valid && cfg.Name.String != "" {
c.Name = cfg.Name
}
if cfg.Host.Valid && cfg.Host.String != "" {
c.Host = cfg.Host
}
if cfg.WebAppURL.Valid {
c.WebAppURL = cfg.WebAppURL
}
if cfg.NoCompress.Valid {
c.NoCompress = cfg.NoCompress
}
if cfg.ProjectID.Valid && cfg.ProjectID.Int64 > 0 {
c.ProjectID = cfg.ProjectID
}
if cfg.MetricPushInterval.Valid {
c.MetricPushInterval = cfg.MetricPushInterval
}
if cfg.MaxMetricSamplesPerPackage.Valid {
c.MaxMetricSamplesPerPackage = cfg.MaxMetricSamplesPerPackage
}
if cfg.AggregationPeriod.Valid {
c.AggregationPeriod = cfg.AggregationPeriod
}
if cfg.AggregationCalcInterval.Valid {
c.AggregationCalcInterval = cfg.AggregationCalcInterval
}
if cfg.AggregationWaitPeriod.Valid {
c.AggregationWaitPeriod = cfg.AggregationWaitPeriod
}
if cfg.AggregationMinSamples.Valid {
c.AggregationMinSamples = cfg.AggregationMinSamples
}
if cfg.AggregationSkipOutlierDetection.Valid {
c.AggregationSkipOutlierDetection = cfg.AggregationSkipOutlierDetection
}
if cfg.AggregationOutlierAlgoThreshold.Valid {
c.AggregationOutlierAlgoThreshold = cfg.AggregationOutlierAlgoThreshold
}
if cfg.AggregationOutlierIqrRadius.Valid {
c.AggregationOutlierIqrRadius = cfg.AggregationOutlierIqrRadius
}
if cfg.AggregationOutlierIqrCoefLower.Valid {
c.AggregationOutlierIqrCoefLower = cfg.AggregationOutlierIqrCoefLower
}
if cfg.AggregationOutlierIqrCoefUpper.Valid {
c.AggregationOutlierIqrCoefUpper = cfg.AggregationOutlierIqrCoefUpper
}
return c
}