From 7b42fce4b7595f4126938fda9d67da20a66d72bc Mon Sep 17 00:00:00 2001 From: Ashvitha Sridharan Date: Tue, 25 Jul 2023 13:26:20 -0400 Subject: [PATCH] Modify telemetry_config_providers --- agent/hcp/telemetry_config_provider.go | 185 +++++----- agent/hcp/telemetry_config_provider_test.go | 384 +++++++++----------- 2 files changed, 253 insertions(+), 316 deletions(-) diff --git a/agent/hcp/telemetry_config_provider.go b/agent/hcp/telemetry_config_provider.go index a79437b6700dd..d98cfd951a9f4 100644 --- a/agent/hcp/telemetry_config_provider.go +++ b/agent/hcp/telemetry_config_provider.go @@ -2,6 +2,7 @@ package hcp import ( "context" + "fmt" "net/url" "regexp" "sync" @@ -9,14 +10,9 @@ import ( goMetrics "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" + "github.com/mitchellh/hashstructure/v2" hcpclient "github.com/hashicorp/consul/agent/hcp/client" - "github.com/hashicorp/consul/agent/hcp/config" -) - -const ( - // defaultRefreshInterval is a default duration, after each - defaultRefreshInterval = 5 * time.Minute ) var ( @@ -26,73 +22,88 @@ var ( // TelemetryConfigProviderOpts is used to initialize a telemetryConfigProvider. type TelemetryConfigProviderOpts struct { - ctx context.Context - endpoint *url.URL - labels map[string]string - filters *regexp.Regexp - refreshInterval time.Duration - cloudCfg config.CloudConfig - hcpClient hcpclient.Client + Ctx context.Context + MetricsConfig *hcpclient.MetricsConfig + RefreshInterval time.Duration + HCPClient hcpclient.Client } -// metricsConfig is a set of configurable settings for metrics collection, processing and export. -type metricsConfig struct { +// dynamicConfig is a set of configurable settings for metrics collection, processing and export. +type dynamicConfig struct { endpoint *url.URL labels map[string]string filters *regexp.Regexp + // refreshInterval controls the interval at which configuration is fetched from HCP to refresh config. + refreshInterval time.Duration } -// telemetryConfigProvider holds metrics configuration and settings for its continuous -// fetch of new config from HCP. +// telemetryConfigProvider holds metrics configuration and settings for continuous fetch of new config from HCP. type telemetryConfigProvider struct { - // metricsConfig holds metrics configuration that can be dynamically updated + // telemetryConfig holds configuration that can be dynamically updated // based on updates fetched from HCP. - metricsConfig *metricsConfig - - // refreshInterval controls the interval at which new configuration is fetched from HCP. - refreshInterval time.Duration + cfg *dynamicConfig + // telemetryConfigHash is used to compare two telemetryConfig objects to see if they are the same. + cfgHash uint64 // a reader-writer mutex is used as the provider is read heavy, as the OTEL components - // access metricsConfig, while config is only updated (write) when there are changes. - rw sync.RWMutex - + // access telemetryConfig, while config is only updated (write) when there are changes. + rw sync.RWMutex logger hclog.Logger - cloudCfg config.CloudConfig hcpClient hcpclient.Client } -func NewTelemetryConfigProvider(opts *TelemetryConfigProviderOpts) *telemetryConfigProvider { - m := &metricsConfig{ - endpoint: opts.endpoint, - labels: opts.labels, - filters: opts.filters, +func NewTelemetryConfigProvider(opts *TelemetryConfigProviderOpts) (*telemetryConfigProvider, error) { + if opts.Ctx == nil { + return nil, fmt.Errorf("missing ctx") + } + + if opts.HCPClient == nil { + return nil, fmt.Errorf("missing HCP client") + } + + if opts.MetricsConfig == nil { + return nil, fmt.Errorf("missing metrics config") + } + + if opts.RefreshInterval <= 0 { + return nil, fmt.Errorf("invalid refresh interval") + } + + cfg := &dynamicConfig{ + endpoint: opts.MetricsConfig.Endpoint, + labels: opts.MetricsConfig.Labels, + filters: opts.MetricsConfig.Filters, + refreshInterval: opts.RefreshInterval, + } + + hash, err := calculateHash(cfg) + if err != nil { + return nil, fmt.Errorf("failed to calculate hash: %w", err) } t := &telemetryConfigProvider{ - metricsConfig: m, - logger: hclog.FromContext(opts.ctx).Named("telemetry_config_provider"), - refreshInterval: opts.refreshInterval, - cloudCfg: opts.cloudCfg, - hcpClient: opts.hcpClient, + cfg: cfg, + cfgHash: hash, + logger: hclog.FromContext(opts.Ctx).Named("telemetry_config_provider"), + hcpClient: opts.HCPClient, } - go t.run(opts.ctx) + go t.run(opts.Ctx, opts.RefreshInterval) - return t + return t, nil } -// run continously checks for updates to the telemetry configuration by -// making a request to HCP, and verifying for any updated fields. -func (t *telemetryConfigProvider) run(ctx context.Context) { - ticker := time.NewTicker(t.refreshInterval) +// run continously checks for updates to the telemetry configuration by making a request to HCP. +// Modification of config only occurs if changes are detected to decrease write locks that block read locks. +func (t *telemetryConfigProvider) run(ctx context.Context, refreshInterval time.Duration) { + ticker := time.NewTicker(refreshInterval) defer ticker.Stop() for { select { case <-ticker.C: - if m, hasChanged := t.checkUpdate(ctx); hasChanged { - // Only update metricsConfig changes are detected - // to decrease usage of write locks that block read locks. - t.modifyMetricsConfig(m) + if newCfg, hasChanged := t.checkUpdate(ctx); hasChanged { + t.modifyTelemetryConfig(newCfg) + ticker.Reset(newCfg.refreshInterval) } case <-ctx.Done(): return @@ -102,80 +113,43 @@ func (t *telemetryConfigProvider) run(ctx context.Context) { // checkUpdate makes a HTTP request to HCP to return a new metrics configuration and true, if config changed. // checkUpdate does not update the metricsConfig field to prevent acquiring the write lock unnecessarily. -func (t *telemetryConfigProvider) checkUpdate(ctx context.Context) (*metricsConfig, bool) { - ctx, cancel := context.WithTimeout(ctx, 60*time.Second) +func (t *telemetryConfigProvider) checkUpdate(ctx context.Context) (*dynamicConfig, bool) { + t.rw.RLock() + defer t.rw.RUnlock() + + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() telemetryCfg, err := t.hcpClient.FetchTelemetryConfig(ctx) if err != nil { - t.logger.Error("failed to fetch telemetry config from HCP") + t.logger.Error("failed to fetch telemetry config from HCP", "error", err) goMetrics.IncrCounter(internalMetricRefreshFailure, 1) return nil, false } - t.rw.RLock() - defer t.rw.RUnlock() - - // TODO: Do we want a enabled config field? - endpoint, _ := telemetryCfg.Enabled() - endpointURL, err := url.Parse(endpoint) - if err != nil { - t.logger.Error("failed to update config: invalid endpoint URL") - goMetrics.IncrCounter(internalMetricRefreshFailure, 1) - return nil, false + newDynamicConfig := &dynamicConfig{ + filters: telemetryCfg.MetricsConfig.Filters, + endpoint: telemetryCfg.MetricsConfig.Endpoint, + labels: telemetryCfg.MetricsConfig.Labels, + refreshInterval: telemetryCfg.RefreshConfig.RefreshInterval, } - filters, err := telemetryCfg.FilterRegex() + newHash, err := calculateHash(newDynamicConfig) if err != nil { - t.logger.Error("failed to update config: invalid filters") + t.logger.Error("failed to calculate hash for new config", "error", err) goMetrics.IncrCounter(internalMetricRefreshFailure, 1) return nil, false } - labels := telemetryCfg.DefaultLabels(t.cloudCfg) - - newMetricsConfig := &metricsConfig{ - filters: filters, - endpoint: endpointURL, - labels: labels, - } - - newEndpoint := endpoint != t.metricsConfig.endpoint.String() - newFilters := filters.String() != t.metricsConfig.filters.String() - newLabels := labelsChanged(t.metricsConfig.labels, labels) - - hasChanged := newEndpoint || newFilters || newLabels - - // TODO: Add refresh interval once added to the protos on the TGW side. - return newMetricsConfig, hasChanged -} - -// labelsChanged returns true if newLabels is different from oldLabels. -func labelsChanged(newLabels map[string]string, oldLabels map[string]string) bool { - // if length is different, then labels have changed, so return true. - if len(newLabels) != len(oldLabels) { - return true - } - - // If length is the same, we must verify k,v pairs have not changed. - // If a new key is not in the old labels, return true. - // If a value has changed, return true. - for newKey, newValue := range newLabels { - if oldValue, ok := oldLabels[newKey]; !ok || newValue != oldValue { - return true - } - } - - // labels have not changed. - return false + return newDynamicConfig, newHash == t.cfgHash } -// modifyMetricsConfig acquires a write lock to modify it with a given metricsConfig object. -func (t *telemetryConfigProvider) modifyMetricsConfig(m *metricsConfig) { +// modifynewTelemetryConfig acquires a write lock to modify it with a given newTelemetryConfig object. +func (t *telemetryConfigProvider) modifyTelemetryConfig(newCfg *dynamicConfig) { t.rw.Lock() defer t.rw.Unlock() - t.metricsConfig = m + t.cfg = newCfg } // GetEndpoint acquires a read lock to return endpoint configuration for consumers. @@ -183,7 +157,7 @@ func (t *telemetryConfigProvider) GetEndpoint() *url.URL { t.rw.RLock() defer t.rw.RUnlock() - return t.metricsConfig.endpoint + return t.cfg.endpoint } // GetFilters acquires a read lock to return filters configuration for consumers. @@ -191,7 +165,7 @@ func (t *telemetryConfigProvider) GetFilters() *regexp.Regexp { t.rw.RLock() defer t.rw.RUnlock() - return t.metricsConfig.filters + return t.cfg.filters } // GetLabels acquires a read lock to return labels configuration for consumers. @@ -199,5 +173,10 @@ func (t *telemetryConfigProvider) GetLabels() map[string]string { t.rw.RLock() defer t.rw.RUnlock() - return t.metricsConfig.labels + return t.cfg.labels +} + +// calculateHash returns a uint64 hash for data that can be used for comparisons. +func calculateHash(cfg *dynamicConfig) (uint64, error) { + return hashstructure.Hash(*cfg, hashstructure.FormatV2, nil) } diff --git a/agent/hcp/telemetry_config_provider_test.go b/agent/hcp/telemetry_config_provider_test.go index 6be83811e2f59..ce4d7004e64b4 100644 --- a/agent/hcp/telemetry_config_provider_test.go +++ b/agent/hcp/telemetry_config_provider_test.go @@ -10,284 +10,242 @@ import ( "time" "github.com/armon/go-metrics" - "github.com/hashicorp/consul/agent/hcp/client" - "github.com/hashicorp/consul/agent/hcp/config" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + + hcpclient "github.com/hashicorp/consul/agent/hcp/client" ) -func TestTelemetryConfigProvider_Sucess(t *testing.T) { - for name, test := range map[string]struct { - filters string - endpoint string - labels map[string]string - telemetryCfg *client.TelemetryConfig +const defaultTestRefreshInterval = 100 * time.Millisecond + +type testConfig struct { + filters string + endpoint string + labels map[string]string +} + +func TestNewTelemetryConfigProvider(t *testing.T) { + t.Parallel() + for name, tc := range map[string]struct { + opts *TelemetryConfigProviderOpts + wantErr string }{ - "SuccessNoChanges": { - filters: "test", - labels: map[string]string{ - "test_label": "123", + "success": { + opts: &TelemetryConfigProviderOpts{ + Ctx: context.Background(), + HCPClient: hcpclient.NewMockClient(t), + MetricsConfig: &hcpclient.MetricsConfig{}, + RefreshInterval: 1 * time.Second, }, - endpoint: "http://test.com/v1/metrics", - telemetryCfg: &client.TelemetryConfig{ - Endpoint: "http://test.com/v1/metrics", - Labels: map[string]string{ - "test_label": "123", - }, - MetricsConfig: &client.MetricsConfig{ - Filters: []string{"test"}, - }, + }, + "failsWithMissingContext": { + opts: &TelemetryConfigProviderOpts{ + HCPClient: hcpclient.NewMockClient(t), + MetricsConfig: &hcpclient.MetricsConfig{}, }, + wantErr: "missing ctx", }, - "successNewLabels": { - filters: "test", - labels: map[string]string{ - "test_label": "123", + "failsWithMissingHCPClient": { + opts: &TelemetryConfigProviderOpts{ + Ctx: context.Background(), + MetricsConfig: &hcpclient.MetricsConfig{}, }, - endpoint: "http://test.com/v1/metrics", - telemetryCfg: &client.TelemetryConfig{ - Endpoint: "http://test.com/v1/metrics", - Labels: map[string]string{ - "new_label": "1234", - }, - MetricsConfig: &client.MetricsConfig{ - Filters: []string{"test"}, - }, + wantErr: "missing HCP client", + }, + "failsWithMissingMetricsConfig": { + opts: &TelemetryConfigProviderOpts{ + Ctx: context.Background(), + HCPClient: hcpclient.NewMockClient(t), }, + wantErr: "missing metrics config", }, - "successNewEndpoint": { - filters: "test", - labels: map[string]string{ - "test_label": "123", + "failsWithInvalidRefreshInterval": { + opts: &TelemetryConfigProviderOpts{ + Ctx: context.Background(), + HCPClient: hcpclient.NewMockClient(t), + MetricsConfig: &hcpclient.MetricsConfig{}, + RefreshInterval: 0 * time.Second, }, - endpoint: "http://test.com/v1/metrics", - telemetryCfg: &client.TelemetryConfig{ - Endpoint: "http://newendpoint.com", - Labels: map[string]string{ + wantErr: "invalid refresh interval", + }, + } { + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + cfgProvider, err := NewTelemetryConfigProvider(tc.opts) + if tc.wantErr != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.wantErr) + require.Nil(t, cfgProvider) + return + } + require.NotNil(t, cfgProvider) + }) + } +} + +func TestTelemetryConfigProvider_Success(t *testing.T) { + for name, tc := range map[string]struct { + optsInputs *testConfig + expected *testConfig + }{ + "noChanges": { + optsInputs: &testConfig{ + endpoint: "http://test.com/v1/metrics", + filters: "test", + labels: map[string]string{ "test_label": "123", }, - MetricsConfig: &client.MetricsConfig{ - Filters: []string{"test"}, + }, + expected: &testConfig{ + endpoint: "http://test.com/v1/metrics", + labels: map[string]string{ + "test_label": "123", }, + filters: "test", }, }, - "successNewFilters": { - filters: "test", - labels: map[string]string{ - "test_label": "123", + "newConfig": { + optsInputs: &testConfig{ + endpoint: "http://test.com/v1/metrics", + filters: "test", + labels: map[string]string{ + "test_label": "123", + }, }, - endpoint: "http://test.com/v1/metrics", - telemetryCfg: &client.TelemetryConfig{ - Endpoint: "http://test.com", - Labels: map[string]string{ + expected: &testConfig{ + endpoint: "http://newendpoint/v1/metrics", + filters: "consul", + labels: map[string]string{ "new_label": "1234", }, - MetricsConfig: &client.MetricsConfig{ - Filters: []string{"consul"}, - }, }, }, } { t.Run(name, func(t *testing.T) { - filters, err := regexp.Compile(test.filters) + // Setup client mock to return the expected config. + mockClient := hcpclient.NewMockClient(t) + + mockCfg, err := telemetryConfig(tc.expected) require.NoError(t, err) - endpoint, err := url.Parse(test.endpoint) + mockClient.EXPECT().FetchTelemetryConfig(mock.Anything).Return(mockCfg, nil) + + // Setup TelemetryConfigProvider with opts inputs. + optsCfg, err := telemetryConfig(tc.optsInputs) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cloudCfg := config.CloudConfig{ - NodeID: "test_node_id", - NodeName: "test_node_name", - } - - mockClient := client.NewMockClient(t) - mockClient.EXPECT().FetchTelemetryConfig(mock.Anything).Return(test.telemetryCfg, nil) - opts := &TelemetryConfigProviderOpts{ - ctx: ctx, - filters: filters, - endpoint: endpoint, - labels: test.labels, - cloudCfg: cloudCfg, - hcpClient: mockClient, - refreshInterval: 1 * time.Second, + MetricsConfig: optsCfg.MetricsConfig, + Ctx: ctx, + HCPClient: mockClient, + RefreshInterval: defaultTestRefreshInterval, } - configProvider := NewTelemetryConfigProvider(opts) - - // TODO: Can I use a test chan to avoid time.Sleep. - time.Sleep(2 * time.Second) - - expectedEndpoint, _ := test.telemetryCfg.Enabled() - expectedEndpointURL, err := url.Parse(expectedEndpoint) - require.NoError(t, err) - - expectedFilters := test.telemetryCfg.MetricsConfig.Filters - expectedFiltersRegex, err := regexp.Compile(strings.Join(expectedFilters, "|")) + configProvider, err := NewTelemetryConfigProvider(opts) require.NoError(t, err) - expectedLabels := test.telemetryCfg.DefaultLabels(cloudCfg) - - require.Equal(t, expectedEndpointURL, configProvider.GetEndpoint()) - require.Equal(t, expectedFiltersRegex.String(), configProvider.GetFilters().String()) - require.Equal(t, expectedLabels, configProvider.GetLabels()) + require.EventuallyWithTf(t, func(c *assert.CollectT) { + assert.Equal(c, tc.expected.endpoint, configProvider.GetEndpoint().String()) + assert.Equal(c, tc.expected.filters, configProvider.GetFilters().String()) + assert.Equal(c, tc.expected.labels, configProvider.GetLabels()) + }, 2*time.Second, defaultTestRefreshInterval, "failed to update telemetry config expected") }) } } func TestTelemetryConfigProvider_UpdateFailuresWithMetrics(t *testing.T) { - for name, test := range map[string]struct { - filters string - endpoint string - labels map[string]string - expect func(*client.MockClient) + for name, tc := range map[string]struct { + expected *testConfig + expect func(*hcpclient.MockClient) }{ - "failsWithInvalidFilters": { - filters: "test", - labels: map[string]string{ - "test_label": "123", - }, - endpoint: "http://test.com/v1/metrics", - expect: func(m *client.MockClient) { - t := &client.TelemetryConfig{ - Endpoint: "http://test.com", - Labels: map[string]string{ - "new_label": "1234", - }, - MetricsConfig: &client.MetricsConfig{ - Filters: []string{"["}, - }, - } - m.EXPECT().FetchTelemetryConfig(mock.Anything).Return(t, nil) - }, - }, - "failsWithInvalidURL": { - filters: "test", - labels: map[string]string{ - "test_label": "123", - }, - endpoint: "http://test.com/v1/metrics", - expect: func(m *client.MockClient) { - t := &client.TelemetryConfig{ - Endpoint: " ", - Labels: map[string]string{ - "new_label": "1234", - }, - MetricsConfig: &client.MetricsConfig{ - Filters: []string{"test"}, - }, - } - m.EXPECT().FetchTelemetryConfig(mock.Anything).Return(t, nil) - }, - }, "failsWithHCPClientFailure": { - filters: "test", - labels: map[string]string{ - "test_label": "123", + expected: &testConfig{ + filters: "test", + labels: map[string]string{ + "test_label": "123", + }, + endpoint: "http://test.com/v1/metrics", }, - endpoint: "http://test.com/v1/metrics", - expect: func(m *client.MockClient) { + expect: func(m *hcpclient.MockClient) { m.EXPECT().FetchTelemetryConfig(mock.Anything).Return(nil, fmt.Errorf("failure")) }, }, } { t.Run(name, func(t *testing.T) { - filters, err := regexp.Compile(test.filters) - require.NoError(t, err) + // Init global metrics sink. + serviceName := "test.telemetry_config_provider" + cfg := metrics.DefaultConfig(serviceName) + cfg.EnableHostname = false - endpoint, err := url.Parse(test.endpoint) + sink := metrics.NewInmemSink(10*time.Second, 10*time.Second) + metrics.NewGlobal(cfg, sink) + + telemetryConfig, err := telemetryConfig(tc.expected) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cloudCfg := config.CloudConfig{ - NodeID: "test_node_id", - NodeName: "test_node_name", - } - - mockClient := client.NewMockClient(t) - test.expect(mockClient) + mockClient := hcpclient.NewMockClient(t) + tc.expect(mockClient) opts := &TelemetryConfigProviderOpts{ - ctx: ctx, - filters: filters, - endpoint: endpoint, - labels: test.labels, - cloudCfg: cloudCfg, - hcpClient: mockClient, - refreshInterval: 1 * time.Second, + Ctx: ctx, + MetricsConfig: telemetryConfig.MetricsConfig, + HCPClient: mockClient, + RefreshInterval: defaultTestRefreshInterval, } - // Init global sink. - serviceName := "test.telemetry_config_provider" - cfg := metrics.DefaultConfig(serviceName) - cfg.EnableHostname = false - - sink := metrics.NewInmemSink(10*time.Second, 10*time.Second) - metrics.NewGlobal(cfg, sink) - - configProvider := NewTelemetryConfigProvider(opts) - - time.Sleep(2 * time.Second) - - require.Equal(t, endpoint, configProvider.GetEndpoint()) - require.Equal(t, filters.String(), configProvider.GetFilters().String()) - require.Equal(t, test.labels, configProvider.GetLabels()) + configProvider, err := NewTelemetryConfigProvider(opts) + require.NoError(t, err) - // Collect sink metrics. - intervals := sink.Data() - require.Len(t, intervals, 1) - key := serviceName + "." + strings.Join(internalMetricRefreshFailure, ".") - sv := intervals[0].Counters[key] + // Eventually tries to run assertions every 100 ms to verify + // if failure metrics and the dynamic config have been updated as expected. + require.EventuallyWithTf(t, func(c *assert.CollectT) { + // Collect sink metrics. + key := serviceName + "." + strings.Join(internalMetricRefreshFailure, ".") + intervals := sink.Data() + sv := intervals[0].Counters[key] + + // Verify count for transform failure metric. + assert.NotNil(c, sv.AggregateSample) + // Check for nil, as in some eventually ticks, the AggregateSample isn't populated yet. + if sv.AggregateSample != nil { + assert.GreaterOrEqual(c, sv.AggregateSample.Count, 1) + } - // Verify count for transform failure metric. - require.NotNil(t, sv) - require.NotNil(t, sv.AggregateSample) - require.GreaterOrEqual(t, 1, sv.AggregateSample.Count) + // Upon failures, config should not have changed. + assert.Equal(c, tc.expected.endpoint, configProvider.GetEndpoint().String()) + assert.Equal(c, tc.expected.filters, configProvider.GetFilters().String()) + assert.Equal(c, tc.expected.labels, configProvider.GetLabels()) + }, 2*time.Second, defaultTestRefreshInterval, "failed to get expected failure metrics") }) } } -func TestLabelsChanged(t *testing.T) { - for name, test := range map[string]struct { - newLabels map[string]string - oldLabels map[string]string - expectedChange bool - }{ - "noChange": { - newLabels: map[string]string{"key1": "test1"}, - oldLabels: map[string]string{"key1": "test1"}, - expectedChange: false, - }, - "newLabelsNewKey": { - newLabels: map[string]string{"key2": "test1"}, - oldLabels: map[string]string{"key1": "test1"}, - expectedChange: true, - }, - "newLabelsSameKey": { - newLabels: map[string]string{"key1": "test2"}, - oldLabels: map[string]string{"key1": "test1"}, - expectedChange: true, - }, - "newLabelsLonger": { - newLabels: map[string]string{"key1": "test1", "key2": "test2", "key3": "test3"}, - oldLabels: map[string]string{"key1": "test1"}, - expectedChange: true, +func telemetryConfig(testCfg *testConfig) (*hcpclient.TelemetryConfig, error) { + filters, err := regexp.Compile(testCfg.filters) + if err != nil { + return nil, err + } + + endpoint, err := url.Parse(testCfg.endpoint) + if err != nil { + return nil, err + } + return &hcpclient.TelemetryConfig{ + MetricsConfig: &hcpclient.MetricsConfig{ + Endpoint: endpoint, + Filters: filters, + Labels: testCfg.labels, }, - "newLabelsShorter": { - newLabels: map[string]string{"key1": "test1", "key2": "test2"}, - oldLabels: map[string]string{"key1": "test1", "key2": "test2", "key3": "test3"}, - expectedChange: true, + RefreshConfig: &hcpclient.RefreshConfig{ + RefreshInterval: defaultTestRefreshInterval, }, - } { - t.Run(name, func(t *testing.T) { - require.Equal(t, test.expectedChange, labelsChanged(test.newLabels, test.oldLabels)) - }) - } + }, nil } - -// TODO: Add race test