Skip to content

Commit

Permalink
etcd_docker 4: Incorporate docker based etcd into Go integration tests
Browse files Browse the repository at this point in the history
PR 4 for #4144

High level approach is as described in #4144 .

This PR integrates docker based etcd into our Go integration tests. It removes the need to have the embed package running
in m3db for them, but doesn't yet touch that functionality.

commit-id:3ae12ffd
  • Loading branch information
andrewmains12 committed Aug 30, 2022
1 parent 0fbfcc1 commit c8b6a12
Show file tree
Hide file tree
Showing 11 changed files with 254 additions and 117 deletions.
11 changes: 11 additions & 0 deletions src/integration/aggregator/aggregator.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,17 @@ ingest:
maxBackoff: 10s
jitter: true
storeMetricsType: true
clusterManagement:
etcd:
env: default_env
zone: embedded
service: m3db
cacheDir: /var/lib/m3kv
etcdClusters:
- zone: embedded
endpoints:
- 127.0.0.1:2379
`

// TestAggregatorAggregatorConfig is the test config for the aggregators.
Expand Down
2 changes: 2 additions & 0 deletions src/integration/aggregator/aggregator_test.go
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
//go:build cluster_integration
// +build cluster_integration

//
// Copyright (c) 2021 Uber Technologies, Inc.
//
Expand Down
74 changes: 53 additions & 21 deletions src/integration/repair/repair_and_replication_test.go
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
//go:build cluster_integration
// +build cluster_integration

//
// Copyright (c) 2021 Uber Technologies, Inc.
//
Expand All @@ -23,28 +25,45 @@
package repair

import (
"context"
"testing"

"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"

"github.com/m3db/m3/src/integration/resources"
"github.com/m3db/m3/src/integration/resources/docker/dockerexternal"
"github.com/m3db/m3/src/integration/resources/inprocess"
"github.com/m3db/m3/src/x/instrument"

"github.com/ory/dockertest/v3"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)

func TestRepairAndReplication(t *testing.T) {
t.Skip("failing after etcd containerization; fix.")
cluster1, cluster2, closer := testSetup(t)
defer closer()

RunTest(t, cluster1, cluster2)
}

func testSetup(t *testing.T) (resources.M3Resources, resources.M3Resources, func()) {
fullCfgs1 := getClusterFullConfgs(t)
fullCfgs2 := getClusterFullConfgs(t)
pool, err := dockertest.NewPool("")
require.NoError(t, err)

ep1 := fullCfgs1.Configs.Coordinator.Clusters[0].Client.EnvironmentConfig.Services[0].Service.ETCDClusters[0].Endpoints
ep2 := fullCfgs2.Configs.Coordinator.Clusters[0].Client.EnvironmentConfig.Services[0].Service.ETCDClusters[0].Endpoints
etcd1 := mustNewStartedEtcd(t, pool)
etcd2 := mustNewStartedEtcd(t, pool)

ep1 := []string{etcd1.Address()}
ep2 := []string{etcd2.Address()}

cluster1Opts := newTestClusterOptions()
cluster1Opts.EtcdEndpoints = ep1

cluster2Opts := newTestClusterOptions()
cluster2Opts.EtcdEndpoints = ep2

fullCfgs1 := getClusterFullConfgs(t, cluster1Opts)
fullCfgs2 := getClusterFullConfgs(t, cluster2Opts)

setRepairAndReplicationCfg(
&fullCfgs1,
Expand All @@ -57,19 +76,28 @@ func testSetup(t *testing.T) (resources.M3Resources, resources.M3Resources, func
ep1,
)

cluster1, err := inprocess.NewClusterFromSpecification(fullCfgs1, clusterOptions)
cluster1, err := inprocess.NewClusterFromSpecification(fullCfgs1, cluster1Opts)
require.NoError(t, err)

cluster2, err := inprocess.NewClusterFromSpecification(fullCfgs2, clusterOptions)
cluster2, err := inprocess.NewClusterFromSpecification(fullCfgs2, cluster2Opts)
require.NoError(t, err)

return cluster1, cluster2, func() {
etcd1.Close(context.TODO())
etcd2.Close(context.TODO())
assert.NoError(t, cluster1.Cleanup())
assert.NoError(t, cluster2.Cleanup())
}
}

func getClusterFullConfgs(t *testing.T) inprocess.ClusterSpecification {
func mustNewStartedEtcd(t *testing.T, pool *dockertest.Pool) *dockerexternal.EtcdNode {
etcd, err := dockerexternal.NewEtcd(pool, instrument.NewOptions())
require.NoError(t, err)
require.NoError(t, etcd.Setup(context.TODO()))
return etcd
}

func getClusterFullConfgs(t *testing.T, clusterOptions resources.ClusterOptions) inprocess.ClusterSpecification {
cfgs, err := inprocess.NewClusterConfigsFromYAML(
TestRepairDBNodeConfig, TestRepairCoordinatorConfig, "",
)
Expand All @@ -84,18 +112,22 @@ func getClusterFullConfgs(t *testing.T) inprocess.ClusterSpecification {
func setRepairAndReplicationCfg(fullCfg *inprocess.ClusterSpecification, clusterName string, endpoints []string) {
for _, dbnode := range fullCfg.Configs.DBNodes {
dbnode.DB.Replication.Clusters[0].Name = clusterName
dbnode.DB.Replication.Clusters[0].Client.EnvironmentConfig.Services[0].Service.ETCDClusters[0].Endpoints = endpoints
etcdService := &(dbnode.DB.Replication.Clusters[0].Client.EnvironmentConfig.Services[0].Service.ETCDClusters[0])
etcdService.AutoSyncInterval = -1
etcdService.Endpoints = endpoints
}
}

var clusterOptions = resources.ClusterOptions{
DBNode: &resources.DBNodeClusterOptions{
RF: 2,
NumShards: 4,
NumInstances: 1,
NumIsolationGroups: 2,
},
Coordinator: resources.CoordinatorClusterOptions{
GeneratePorts: true,
},
func newTestClusterOptions() resources.ClusterOptions {
return resources.ClusterOptions{
DBNode: &resources.DBNodeClusterOptions{
RF: 2,
NumShards: 4,
NumInstances: 1,
NumIsolationGroups: 2,
},
Coordinator: resources.CoordinatorClusterOptions{
GeneratePorts: true,
},
}
}
50 changes: 25 additions & 25 deletions src/integration/resources/coordinator_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,8 @@ var errUnknownServiceType = errors.New("unknown service type")
// operation until successful.
type RetryFunc func(op func() error) error

// ZapMethod appends the method as a log field.
func ZapMethod(s string) zapcore.Field { return zap.String("method", s) }
// zapMethod appends the method as a log field.
func zapMethod(s string) zapcore.Field { return zap.String("method", s) }

// CoordinatorClient is a client use to invoke API calls
// on a coordinator
Expand Down Expand Up @@ -97,7 +97,7 @@ func (c *CoordinatorClient) makeURL(resource string) string {
func (c *CoordinatorClient) GetNamespace() (admin.NamespaceGetResponse, error) {
url := c.makeURL("api/v1/services/m3db/namespace")
logger := c.logger.With(
ZapMethod("getNamespace"), zap.String("url", url))
zapMethod("getNamespace"), zap.String("url", url))

//nolint:noctx
resp, err := c.client.Get(url)
Expand Down Expand Up @@ -129,7 +129,7 @@ func (c *CoordinatorClient) GetPlacement(opts PlacementRequestOptions) (admin.Pl
}
url := c.makeURL(handlerurl)
logger := c.logger.With(
ZapMethod("getPlacement"), zap.String("url", url))
zapMethod("getPlacement"), zap.String("url", url))

resp, err := c.makeRequest(logger, url, placementhandler.GetHTTPMethod, nil, placementOptsToMap(opts))
if err != nil {
Expand Down Expand Up @@ -163,7 +163,7 @@ func (c *CoordinatorClient) InitPlacement(
}
url := c.makeURL(handlerurl)
logger := c.logger.With(
ZapMethod("initPlacement"), zap.String("url", url))
zapMethod("initPlacement"), zap.String("url", url))

resp, err := c.makeRequest(logger, url, placementhandler.InitHTTPMethod, &initRequest, placementOptsToMap(opts))
if err != nil {
Expand Down Expand Up @@ -194,7 +194,7 @@ func (c *CoordinatorClient) DeleteAllPlacements(opts PlacementRequestOptions) er
}
url := c.makeURL(handlerurl)
logger := c.logger.With(
ZapMethod("deleteAllPlacements"), zap.String("url", url))
zapMethod("deleteAllPlacements"), zap.String("url", url))

resp, err := c.makeRequest(
logger, url, placementhandler.DeleteAllHTTPMethod, nil, placementOptsToMap(opts),
Expand All @@ -221,7 +221,7 @@ func (c *CoordinatorClient) DeleteAllPlacements(opts PlacementRequestOptions) er
// NB: if the name string is empty, this will instead
// check for a successful response.
func (c *CoordinatorClient) WaitForNamespace(name string) error {
logger := c.logger.With(ZapMethod("waitForNamespace"))
logger := c.logger.With(zapMethod("waitForNamespace"))
return c.retryFunc(func() error {
ns, err := c.GetNamespace()
if err != nil {
Expand Down Expand Up @@ -250,7 +250,7 @@ func (c *CoordinatorClient) WaitForNamespace(name string) error {
func (c *CoordinatorClient) WaitForInstances(
ids []string,
) error {
logger := c.logger.With(ZapMethod("waitForPlacement"))
logger := c.logger.With(zapMethod("waitForPlacement"))
return c.retryFunc(func() error {
placement, err := c.GetPlacement(PlacementRequestOptions{Service: ServiceTypeM3DB})
if err != nil {
Expand Down Expand Up @@ -282,7 +282,7 @@ func (c *CoordinatorClient) WaitForInstances(

// WaitForShardsReady waits until all shards gets ready.
func (c *CoordinatorClient) WaitForShardsReady() error {
logger := c.logger.With(ZapMethod("waitForShards"))
logger := c.logger.With(zapMethod("waitForShards"))
return c.retryFunc(func() error {
placement, err := c.GetPlacement(PlacementRequestOptions{Service: ServiceTypeM3DB})
if err != nil {
Expand All @@ -307,7 +307,7 @@ func (c *CoordinatorClient) WaitForShardsReady() error {
func (c *CoordinatorClient) WaitForClusterReady() error {
var (
url = c.makeURL("ready")
logger = c.logger.With(ZapMethod("waitForClusterReady"), zap.String("url", url))
logger = c.logger.With(zapMethod("waitForClusterReady"), zap.String("url", url))
)
return c.retryFunc(func() error {
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, nil)
Expand Down Expand Up @@ -350,7 +350,7 @@ func (c *CoordinatorClient) CreateDatabase(
) (admin.DatabaseCreateResponse, error) {
url := c.makeURL("api/v1/database/create")
logger := c.logger.With(
ZapMethod("createDatabase"), zap.String("url", url),
zapMethod("createDatabase"), zap.String("url", url),
zap.String("request", addRequest.String()))

resp, err := c.makeRequest(logger, url, http.MethodPost, &addRequest, nil)
Expand Down Expand Up @@ -383,7 +383,7 @@ func (c *CoordinatorClient) AddNamespace(
) (admin.NamespaceGetResponse, error) {
url := c.makeURL("api/v1/services/m3db/namespace")
logger := c.logger.With(
ZapMethod("addNamespace"), zap.String("url", url),
zapMethod("addNamespace"), zap.String("url", url),
zap.String("request", addRequest.String()))

resp, err := c.makeRequest(logger, url, http.MethodPost, &addRequest, nil)
Expand Down Expand Up @@ -411,7 +411,7 @@ func (c *CoordinatorClient) UpdateNamespace(
) (admin.NamespaceGetResponse, error) {
url := c.makeURL("api/v1/services/m3db/namespace")
logger := c.logger.With(
ZapMethod("updateNamespace"), zap.String("url", url),
zapMethod("updateNamespace"), zap.String("url", url),
zap.String("request", req.String()))

resp, err := c.makeRequest(logger, url, http.MethodPut, &req, nil)
Expand All @@ -431,7 +431,7 @@ func (c *CoordinatorClient) UpdateNamespace(
func (c *CoordinatorClient) setNamespaceReady(name string) error {
url := c.makeURL("api/v1/services/m3db/namespace/ready")
logger := c.logger.With(
ZapMethod("setNamespaceReady"), zap.String("url", url),
zapMethod("setNamespaceReady"), zap.String("url", url),
zap.String("namespace", name))

_, err := c.makeRequest(logger, url, http.MethodPost, // nolint: bodyclose
Expand All @@ -445,7 +445,7 @@ func (c *CoordinatorClient) setNamespaceReady(name string) error {
// DeleteNamespace removes the namespace.
func (c *CoordinatorClient) DeleteNamespace(namespaceID string) error {
url := c.makeURL("api/v1/services/m3db/namespace/" + namespaceID)
logger := c.logger.With(ZapMethod("deleteNamespace"), zap.String("url", url))
logger := c.logger.With(zapMethod("deleteNamespace"), zap.String("url", url))

if _, err := c.makeRequest(logger, url, http.MethodDelete, nil, nil); err != nil { // nolint: bodyclose
logger.Error("failed to delete namespace", zap.Error(err))
Expand All @@ -462,7 +462,7 @@ func (c *CoordinatorClient) InitM3msgTopic(
) (admin.TopicGetResponse, error) {
url := c.makeURL(topic.InitURL)
logger := c.logger.With(
ZapMethod("initM3msgTopic"),
zapMethod("initM3msgTopic"),
zap.String("url", url),
zap.String("request", initRequest.String()),
zap.String("topic", fmt.Sprintf("%v", topicOpts)))
Expand All @@ -489,7 +489,7 @@ func (c *CoordinatorClient) GetM3msgTopic(
) (admin.TopicGetResponse, error) {
url := c.makeURL(topic.GetURL)
logger := c.logger.With(
ZapMethod("getM3msgTopic"), zap.String("url", url),
zapMethod("getM3msgTopic"), zap.String("url", url),
zap.String("topic", fmt.Sprintf("%v", topicOpts)))

resp, err := c.makeRequest(logger, url, topic.GetHTTPMethod, nil, m3msgTopicOptionsToMap(topicOpts))
Expand All @@ -516,7 +516,7 @@ func (c *CoordinatorClient) AddM3msgTopicConsumer(
) (admin.TopicGetResponse, error) {
url := c.makeURL(topic.AddURL)
logger := c.logger.With(
ZapMethod("addM3msgTopicConsumer"),
zapMethod("addM3msgTopicConsumer"),
zap.String("url", url),
zap.String("request", addRequest.String()),
zap.String("topic", fmt.Sprintf("%v", topicOpts)))
Expand Down Expand Up @@ -557,7 +557,7 @@ func (c *CoordinatorClient) WriteCarbon(
url string, metric string, v float64, t time.Time,
) error {
logger := c.logger.With(
ZapMethod("writeCarbon"), zap.String("url", url),
zapMethod("writeCarbon"), zap.String("url", url),
zap.String("at time", time.Now().String()),
zap.String("at ts", t.String()))

Expand Down Expand Up @@ -623,7 +623,7 @@ func (c *CoordinatorClient) WritePromWithRequest(writeRequest prompb.WriteReques
url := c.makeURL("api/v1/prom/remote/write")

logger := c.logger.With(
ZapMethod("writeProm"), zap.String("url", url),
zapMethod("writeProm"), zap.String("url", url),
zap.String("request", writeRequest.String()))

body, err := proto.Marshal(&writeRequest)
Expand Down Expand Up @@ -697,7 +697,7 @@ func (c *CoordinatorClient) ApplyKVUpdate(update string) error {
url := c.makeURL("api/v1/kvstore")

logger := c.logger.With(
ZapMethod("ApplyKVUpdate"), zap.String("url", url),
zapMethod("ApplyKVUpdate"), zap.String("url", url),
zap.String("update", update))

data := bytes.NewBuffer([]byte(update))
Expand Down Expand Up @@ -731,7 +731,7 @@ func (c *CoordinatorClient) query(
) error {
url := c.makeURL(query)
logger := c.logger.With(
ZapMethod("query"), zap.String("url", url), zap.Any("headers", headers))
zapMethod("query"), zap.String("url", url), zap.Any("headers", headers))
logger.Info("running")
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, nil)
if err != nil {
Expand Down Expand Up @@ -962,7 +962,7 @@ func (c *CoordinatorClient) runQuery(
) (string, error) {
url := c.makeURL(query)
logger := c.logger.With(
ZapMethod("query"), zap.String("url", url), zap.Any("headers", headers))
zapMethod("query"), zap.String("url", url), zap.Any("headers", headers))
logger.Info("running")
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, nil)
if err != nil {
Expand Down Expand Up @@ -1000,7 +1000,7 @@ func (c *CoordinatorClient) runQuery(
func (c *CoordinatorClient) RunQuery(
verifier ResponseVerifier, query string, headers map[string][]string,
) error {
logger := c.logger.With(ZapMethod("runQuery"),
logger := c.logger.With(zapMethod("runQuery"),
zap.String("query", query))
err := c.retryFunc(func() error {
err := c.query(verifier, query, headers)
Expand Down Expand Up @@ -1067,7 +1067,7 @@ func (c *CoordinatorClient) GraphiteQuery(

url := c.makeURL(queryStr)
logger := c.logger.With(
ZapMethod("graphiteQuery"), zap.String("url", url))
zapMethod("graphiteQuery"), zap.String("url", url))
logger.Info("running")
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, nil)
if err != nil {
Expand Down
2 changes: 0 additions & 2 deletions src/integration/resources/docker/dockerexternal/etcd.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,8 +156,6 @@ func (c *EtcdNode) Setup(ctx context.Context) (closeErr error) {
// This is coming from the equivalent of docker inspect <container_id>
portBinds := container.NetworkSettings.Ports["2379/tcp"]

// If running in a docker container e.g. on buildkite, route to etcd using the published port on the *host* machine.
// See also http://github.com/m3db/m3/blob/master/docker-compose.yml#L16-L16
ipAddr := "127.0.0.1"
_, err = net.ResolveIPAddr("ip4", "host.docker.internal")
if err == nil {
Expand Down
Loading

0 comments on commit c8b6a12

Please sign in to comment.