diff --git a/README.md b/README.md index 126716f73b..f0474ad2c6 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,8 @@ Sumo Logic Helm Chart Version | version | status | |-----------------------------------------------------------------------------------------------------------|-----------------------------------------| -| [v2.16](https://github.com/SumoLogic/sumologic-kubernetes-collection/tree/release-v2.15/deploy/README.md) | current / supported | +| [v2.17](https://github.com/SumoLogic/sumologic-kubernetes-collection/tree/release-v2.17/deploy/README.md) | current / supported | +| [v2.16](https://github.com/SumoLogic/sumologic-kubernetes-collection/tree/release-v2.16/deploy/README.md) | deprecated / supported until 2023-03-15 | | [v2.15](https://github.com/SumoLogic/sumologic-kubernetes-collection/tree/release-v2.15/deploy/README.md) | deprecated / supported until 2023-03-13 | | [v2.14](https://github.com/SumoLogic/sumologic-kubernetes-collection/tree/release-v2.14/deploy/README.md) | deprecated / supported until 2023-03-02 | | [v2.13](https://github.com/SumoLogic/sumologic-kubernetes-collection/tree/release-v2.13/deploy/README.md) | deprecated / supported until 2023-01-29 | diff --git a/deploy/README.md b/deploy/README.md index 2ef1f5cfc0..187d5124dc 100644 --- a/deploy/README.md +++ b/deploy/README.md @@ -9,6 +9,7 @@ for details on our Kubernetes Solution. - [Minimum Requirements](#minimum-requirements) - [Support Matrix](#support-matrix) - [ARM support](#arm-support) + - [Falco support](#falco-support) Documentation for other versions can be found in the [main README file](https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/main/README.md#documentation). @@ -133,3 +134,7 @@ The only exception to the above is Falco, which currently lacks official ARM Doc [falco]: https://github.com/falcosecurity/falco/issues/1589 [issues]: https://github.com/SumoLogic/sumologic-kubernetes-collection/issues + +### Falco support + +Falco is embedded in this Helm Chart for user convenience only - Sumo Logic does not provide production support for it. diff --git a/deploy/docs/Best_Practices.md b/deploy/docs/Best_Practices.md index 10ec5e865c..d8d0a01705 100644 --- a/deploy/docs/Best_Practices.md +++ b/deploy/docs/Best_Practices.md @@ -45,6 +45,7 @@ - [Using NodeSelectors](#using-nodeselectors) - [Binding pods to linux nodes](#binding-pods-to-linux-nodes) - [Disable Thanos](#disable-thanos) +- [Parsing log content as json](#parsing-log-content-as-json) ## Overriding chart resource names with `fullnameOverride` @@ -1448,3 +1449,20 @@ kube-prometheus-stack: prometheusSpec: thanos: null ``` + +## Parsing log content as json + +In order to parse and store log content as json following configuration has to be applied: + +```yaml +fluentd: + logs: + containers: + extraOutputPluginConf: |- + + @type record_modifier + + _sumo_metadata ${record["_sumo_metadata"][:log_format] = 'json_merge'; record["_sumo_metadata"]} + + +``` diff --git a/deploy/helm/sumologic/values.yaml b/deploy/helm/sumologic/values.yaml index 9b455edf34..39c2219742 100644 --- a/deploy/helm/sumologic/values.yaml +++ b/deploy/helm/sumologic/values.yaml @@ -280,7 +280,7 @@ sumologic: replicaCount: 3 image: repository: public.ecr.aws/sumologic/nginx-unprivileged - tag: 1.21-alpine + tag: 1.23-alpine pullPolicy: IfNotPresent resources: limits: @@ -5024,7 +5024,8 @@ telegraf-operator: metric_version = 2 # imagePullSecrets: [] -## Configure falco +## Configure Falco +## Please note that Falco is embedded in this Helm Chart for user convenience only - Sumo Logic does not provide production support for it ## This is an experimental configuration and shouldn't be used in production environment ## https://github.com/falcosecurity/charts/tree/master/falco falco: diff --git a/tests/helm/remote_write_proxy/static/basic.output.yaml b/tests/helm/remote_write_proxy/static/basic.output.yaml index bc5aabb557..92c1a1265a 100644 --- a/tests/helm/remote_write_proxy/static/basic.output.yaml +++ b/tests/helm/remote_write_proxy/static/basic.output.yaml @@ -28,7 +28,7 @@ spec: {} containers: - name: nginx - image: public.ecr.aws/sumologic/nginx-unprivileged:1.21-alpine + image: public.ecr.aws/sumologic/nginx-unprivileged:1.23-alpine imagePullPolicy: IfNotPresent ports: - containerPort: 8080 diff --git a/tests/integration/go.mod b/tests/integration/go.mod index aed06edd12..2bb3b2e3f5 100644 --- a/tests/integration/go.mod +++ b/tests/integration/go.mod @@ -5,8 +5,8 @@ go 1.18 require ( github.com/gruntwork-io/terratest v0.40.22 github.com/stretchr/testify v1.8.0 - k8s.io/api v0.25.1 - k8s.io/apimachinery v0.25.1 + k8s.io/api v0.25.2 + k8s.io/apimachinery v0.25.2 k8s.io/klog/v2 v2.80.1 sigs.k8s.io/e2e-framework v0.0.7 ) @@ -83,7 +83,7 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/client-go v0.25.1 + k8s.io/client-go v0.25.2 k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect sigs.k8s.io/controller-runtime v0.11.2 // indirect diff --git a/tests/integration/go.sum b/tests/integration/go.sum index 3d42402f59..bf58f6bc9a 100644 --- a/tests/integration/go.sum +++ b/tests/integration/go.sum @@ -1023,17 +1023,17 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.24.1/go.mod h1:JhoOvNiLXKTPQ60zh2g0ewpA+bnEYf5q44Flhquh4vQ= -k8s.io/api v0.25.1 h1:yL7du50yc93k17nH/Xe9jujAYrcDkI/i5DL1jPz4E3M= -k8s.io/api v0.25.1/go.mod h1:hh4itDvrWSJsmeUc28rIFNri8MatNAAxJjKcQmhX6TU= +k8s.io/api v0.25.2 h1:v6G8RyFcwf0HR5jQGIAYlvtRNrxMJQG1xJzaSeVnIS8= +k8s.io/api v0.25.2/go.mod h1:qP1Rn4sCVFwx/xIhe+we2cwBLTXNcheRyYXwajonhy0= k8s.io/apiextensions-apiserver v0.24.1 h1:5yBh9+ueTq/kfnHQZa0MAo6uNcPrtxPMpNQgorBaKS0= k8s.io/apiextensions-apiserver v0.24.1/go.mod h1:A6MHfaLDGfjOc/We2nM7uewD5Oa/FnEbZ6cD7g2ca4Q= k8s.io/apimachinery v0.24.1/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/apimachinery v0.25.1 h1:t0XrnmCEHVgJlR2arwO8Awp9ylluDic706WePaYCBTI= -k8s.io/apimachinery v0.25.1/go.mod h1:hqqA1X0bsgsxI6dXsJ4HnNTBOmJNxyPp8dw3u2fSHwA= +k8s.io/apimachinery v0.25.2 h1:WbxfAjCx+AeN8Ilp9joWnyJ6xu9OMeS/fsfjK/5zaQs= +k8s.io/apimachinery v0.25.2/go.mod h1:hqqA1X0bsgsxI6dXsJ4HnNTBOmJNxyPp8dw3u2fSHwA= k8s.io/apiserver v0.24.1/go.mod h1:dQWNMx15S8NqJMp0gpYfssyvhYnkilc1LpExd/dkLh0= k8s.io/client-go v0.24.1/go.mod h1:f1kIDqcEYmwXS/vTbbhopMUbhKp2JhOeVTfxgaCIlF8= -k8s.io/client-go v0.25.1 h1:uFj4AJKtE1/ckcSKz8IhgAuZTdRXZDKev8g387ndD58= -k8s.io/client-go v0.25.1/go.mod h1:rdFWTLV/uj2C74zGbQzOsmXPUtMAjSf7ajil4iJUNKo= +k8s.io/client-go v0.25.2 h1:SUPp9p5CwM0yXGQrwYurw9LWz+YtMwhWd0GqOsSiefo= +k8s.io/client-go v0.25.2/go.mod h1:i7cNU7N+yGQmJkewcRD2+Vuj4iz7b30kI8OcL3horQ4= k8s.io/code-generator v0.24.1/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= k8s.io/component-base v0.24.1 h1:APv6W/YmfOWZfo+XJ1mZwep/f7g7Tpwvdbo9CQLDuts= k8s.io/component-base v0.24.1/go.mod h1:DW5vQGYVCog8WYpNob3PMmmsY8A3L9QZNg4j/dV3s38= diff --git a/tests/integration/helm_otelcol_traces_test.go b/tests/integration/helm_otelcol_traces_test.go new file mode 100644 index 0000000000..9cff4d6dcf --- /dev/null +++ b/tests/integration/helm_otelcol_traces_test.go @@ -0,0 +1,220 @@ +package integration + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/SumoLogic/sumologic-kubernetes-collection/tests/integration/internal" + "github.com/SumoLogic/sumologic-kubernetes-collection/tests/integration/internal/ctxopts" + "github.com/SumoLogic/sumologic-kubernetes-collection/tests/integration/internal/stepfuncs" + terrak8s "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/e2e-framework/klient/k8s" + "sigs.k8s.io/e2e-framework/klient/k8s/resources" + "sigs.k8s.io/e2e-framework/klient/wait" + "sigs.k8s.io/e2e-framework/klient/wait/conditions" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" +) + +func Test_Helm_Otelcol_Traces(t *testing.T) { + const ( + tickDuration = 3 * time.Second + waitDuration = 3 * time.Minute + tracesPerExporter uint = 10 // number of traces generated per exporter + spansPerTrace uint = 5 + ) + featInstall := features.New("traces"). + Assess("sumologic secret is created with endpoints", + func(ctx context.Context, t *testing.T, envConf *envconf.Config) context.Context { + terrak8s.WaitUntilSecretAvailable(t, ctxopts.KubectlOptions(ctx), "sumologic", 60, tickDuration) + secret := terrak8s.GetSecret(t, ctxopts.KubectlOptions(ctx), "sumologic") + require.Len(t, secret.Data, 2, "Secret has incorrect number of endpoints. There should be 2 endpoints.") + return ctx + }). + // TODO: Rewrite into similar step func as WaitUntilStatefulSetIsReady but for deployments + Assess("otelcol deployment is ready", func(ctx context.Context, t *testing.T, envConf *envconf.Config) context.Context { + res := envConf.Client().Resources(ctxopts.Namespace(ctx)) + releaseName := ctxopts.HelmRelease(ctx) + labelSelector := fmt.Sprintf("app=%s-sumologic-otelcol", releaseName) + ds := appsv1.DeploymentList{} + + require.NoError(t, + wait.For( + conditions.New(res). + ResourceListN(&ds, 1, + resources.WithLabelSelector(labelSelector), + ), + wait.WithTimeout(waitDuration), + wait.WithInterval(tickDuration), + ), + ) + require.NoError(t, + wait.For( + conditions.New(res). + DeploymentConditionMatch(&ds.Items[0], appsv1.DeploymentAvailable, corev1.ConditionTrue), + wait.WithTimeout(waitDuration), + wait.WithInterval(tickDuration), + ), + ) + return ctx + }). + // TODO: Rewrite into similar step func as WaitUntilStatefulSetIsReady but for daemonsets + Assess("otelagent daemonset is ready", func(ctx context.Context, t *testing.T, envConf *envconf.Config) context.Context { + res := envConf.Client().Resources(ctxopts.Namespace(ctx)) + nl := corev1.NodeList{} + if !assert.NoError(t, res.List(ctx, &nl)) { + return ctx + } + + releaseName := ctxopts.HelmRelease(ctx) + labelSelector := fmt.Sprintf("app=%s-sumologic-otelagent", releaseName) + ds := appsv1.DaemonSetList{} + + require.NoError(t, + wait.For( + conditions.New(res). + ResourceListN(&ds, 1, + resources.WithLabelSelector(labelSelector), + ), + wait.WithTimeout(waitDuration), + wait.WithInterval(tickDuration), + ), + ) + require.NoError(t, + wait.For( + conditions.New(res). + ResourceMatch(&ds.Items[0], func(object k8s.Object) bool { + d := object.(*appsv1.DaemonSet) + return d.Status.NumberUnavailable == 0 && + d.Status.NumberReady == int32(len(nl.Items)) + }), + wait.WithTimeout(waitDuration), + wait.WithInterval(tickDuration), + ), + ) + return ctx + }).Feature() + + featTraces := features.New("traces"). + Setup(stepfuncs.GenerateTraces( + tracesPerExporter, + spansPerTrace, + internal.TracesGeneratorName, + internal.TracesGeneratorNamespace, + internal.TracesGeneratorImage, + )). + Assess("wait for otlp http traces", stepfuncs.WaitUntilExpectedTracesPresent( + tracesPerExporter, + spansPerTrace, + map[string]string{ + "__name__": "root-span-otlpHttp", + "service.name": "customer-trace-test-service", + "_collector": "kubernetes", + "k8s.cluster.name": "kubernetes", + "k8s.container.name": internal.TracesGeneratorName, + "k8s.deployment.name": internal.TracesGeneratorName, + "k8s.namespace.name": internal.TracesGeneratorNamespace, + "k8s.pod.pod_name": internal.TracesGeneratorName, + "k8s.pod.label.app": internal.TracesGeneratorName, + // "_sourceCategory": "kubernetes/customer/trace/tester/customer/trace/tester", + "_sourceName": fmt.Sprintf("%s.%s.%s", internal.TracesGeneratorNamespace, internal.TracesGeneratorName, internal.TracesGeneratorName), + }, + internal.ReceiverMockNamespace, + internal.ReceiverMockServiceName, + internal.ReceiverMockServicePort, + waitDuration, + tickDuration, + )). + Assess("wait for otlp grpc traces", stepfuncs.WaitUntilExpectedTracesPresent( + tracesPerExporter, + spansPerTrace, + map[string]string{ + "__name__": "root-span-otlpGrpc", + "service.name": "customer-trace-test-service", + "_collector": "kubernetes", + "k8s.cluster.name": "kubernetes", + "k8s.container.name": internal.TracesGeneratorName, + "k8s.deployment.name": internal.TracesGeneratorName, + "k8s.namespace.name": internal.TracesGeneratorNamespace, + "k8s.pod.pod_name": internal.TracesGeneratorName, + "k8s.pod.label.app": internal.TracesGeneratorName, + // "_sourceCategory": "kubernetes/customer/trace/tester/customer/trace/tester", + "_sourceName": fmt.Sprintf("%s.%s.%s", internal.TracesGeneratorNamespace, internal.TracesGeneratorName, internal.TracesGeneratorName), + }, + internal.ReceiverMockNamespace, + internal.ReceiverMockServiceName, + internal.ReceiverMockServicePort, + waitDuration, + tickDuration, + )). + Assess("wait for zipkin traces", stepfuncs.WaitUntilExpectedTracesPresent( + tracesPerExporter, + spansPerTrace, + map[string]string{ + "__name__": "root-span-zipkin", + "service.name": "customer-trace-test-service", + "_collector": "kubernetes", + "k8s.cluster.name": "kubernetes", + "k8s.container.name": internal.TracesGeneratorName, + "k8s.deployment.name": internal.TracesGeneratorName, + "k8s.namespace.name": internal.TracesGeneratorNamespace, + "k8s.pod.pod_name": internal.TracesGeneratorName, + "k8s.pod.label.app": internal.TracesGeneratorName, + // "_sourceCategory": "kubernetes/customer/trace/tester/customer/trace/tester", + "_sourceName": fmt.Sprintf("%s.%s.%s", internal.TracesGeneratorNamespace, internal.TracesGeneratorName, internal.TracesGeneratorName), + }, + internal.ReceiverMockNamespace, + internal.ReceiverMockServiceName, + internal.ReceiverMockServicePort, + waitDuration, + tickDuration, + )). + Assess("wait for jaeger thrift http traces", stepfuncs.WaitUntilExpectedTracesPresent( + tracesPerExporter, + spansPerTrace, + map[string]string{ + "__name__": "root-span-jaegerThriftHttp", + "service.name": "customer-trace-test-service", + "_collector": "kubernetes", + "k8s.cluster.name": "kubernetes", + "k8s.container.name": internal.TracesGeneratorName, + "k8s.deployment.name": internal.TracesGeneratorName, + "k8s.namespace.name": internal.TracesGeneratorNamespace, + "k8s.pod.pod_name": internal.TracesGeneratorName, + "k8s.pod.label.app": internal.TracesGeneratorName, + // "_sourceCategory": "kubernetes/customer/trace/tester/customer/trace/tester", + "_sourceName": fmt.Sprintf("%s.%s.%s", internal.TracesGeneratorNamespace, internal.TracesGeneratorName, internal.TracesGeneratorName), + "otel.library.name": "jaegerThriftHttp", + }, + internal.ReceiverMockNamespace, + internal.ReceiverMockServiceName, + internal.ReceiverMockServicePort, + waitDuration, + tickDuration, + )). + Assess("wait for all spans", stepfuncs.WaitUntilExpectedSpansPresent( + 4*tracesPerExporter*spansPerTrace, // there are 4 exporters + map[string]string{}, + internal.ReceiverMockNamespace, + internal.ReceiverMockServiceName, + internal.ReceiverMockServicePort, + waitDuration, + tickDuration, + )). + Teardown(func(ctx context.Context, t *testing.T, envConf *envconf.Config) context.Context { + opts := *ctxopts.KubectlOptions(ctx) + opts.Namespace = internal.TracesGeneratorNamespace + terrak8s.RunKubectl(t, &opts, "delete", "deployment", internal.TracesGeneratorName) + return ctx + }). + Teardown(stepfuncs.KubectlDeleteNamespaceOpt(internal.TracesGeneratorNamespace)). + Feature() + + testenv.Test(t, featInstall, featTraces) +} diff --git a/tests/integration/internal/constants.go b/tests/integration/internal/constants.go index 62408e1ec2..ff27b28306 100644 --- a/tests/integration/internal/constants.go +++ b/tests/integration/internal/constants.go @@ -27,6 +27,10 @@ const ( LogsGeneratorName = "logs-generator" LogsGeneratorImage = "sumologic/kubernetes-tools:2.13.0" + TracesGeneratorNamespace = "customer-trace-tester" + TracesGeneratorName = "customer-trace-tester" + TracesGeneratorImage = "sumologic/kubernetes-tools:2.13.0" + MultilineLogsNamespace = "multiline-logs-generator" MultilineLogsPodName = "multiline-logs-generator" MultilineLogsGenerator = "yamls/multiline-logs-generator.yaml" diff --git a/tests/integration/internal/receivermock/receiver_mock.go b/tests/integration/internal/receivermock/receiver_mock.go index c0ab3bc049..d64be63353 100644 --- a/tests/integration/internal/receivermock/receiver_mock.go +++ b/tests/integration/internal/receivermock/receiver_mock.go @@ -19,6 +19,9 @@ import ( // Mapping of metric names to the number of times the metric was observed type MetricCounts map[string]int +type SpanId string +type TraceId string + // A HTTP client for the receiver-mock API type ReceiverMockClient struct { baseUrl url.URL @@ -161,6 +164,79 @@ func (client *ReceiverMockClient) GetLogsCount(t *testing.T, metadataFilters Met return response.Count, nil } +type Span struct { + Name string `json:"name,omitempty"` + Id SpanId `json:"id,omitempty"` + TraceId TraceId `json:"trace_id,omitempty"` + ParentSpanId SpanId `json:"parent_span_id,omitempty"` + Labels Labels `json:"attributes,omitempty"` +} + +func (client *ReceiverMockClient) GetSpansCount(t *testing.T, metadataFilters MetadataFilters) (uint, error) { + path := parseUrl(t, "spans-list") + + queryParams := url.Values{} + for key, value := range metadataFilters { + queryParams.Set(key, value) + } + + url := client.baseUrl.ResolveReference(path) + url.RawQuery = queryParams.Encode() + + resp, err := http.Get(url.String()) + if err != nil { + return 0, fmt.Errorf("failed fetching %s, err: %w", url, err) + } + + if resp.StatusCode != 200 { + return 0, fmt.Errorf( + "received status code %d in response to receiver request at %q", + resp.StatusCode, url, + ) + } + + var spans []Span + if err := json.NewDecoder(resp.Body).Decode(&spans); err != nil { + return 0, err + } + return uint(len(spans)), nil +} + +func (client *ReceiverMockClient) GetTracesCounts(t *testing.T, metadataFilters MetadataFilters) ([]uint, error) { + path := parseUrl(t, "traces-list") + + queryParams := url.Values{} + for key, value := range metadataFilters { + queryParams.Set(key, value) + } + + url := client.baseUrl.ResolveReference(path) + url.RawQuery = queryParams.Encode() + + resp, err := http.Get(url.String()) + if err != nil { + return []uint{}, fmt.Errorf("failed fetching %s, err: %w", url, err) + } + + if resp.StatusCode != 200 { + return []uint{}, fmt.Errorf( + "received status code %d in response to receiver request at %q", + resp.StatusCode, url, + ) + } + + var traces [][]Span + if err := json.NewDecoder(resp.Body).Decode(&traces); err != nil { + return []uint{}, err + } + + var tracesLengths = make([]uint, len(traces)) + for i := 0; i < len(tracesLengths); i++ { + tracesLengths[i] = uint(len(traces[i])) + } + return tracesLengths, nil +} + // parse metrics list returned by /metrics-list // https://github.com/SumoLogic/sumologic-kubernetes-tools/tree/main/src/rust/receiver-mock#statistics func parseMetricList(rawMetricsValues string) (map[string]int, error) { diff --git a/tests/integration/internal/stepfuncs/assess_funcs.go b/tests/integration/internal/stepfuncs/assess_funcs.go index 557c436f3b..045aea6f42 100644 --- a/tests/integration/internal/stepfuncs/assess_funcs.go +++ b/tests/integration/internal/stepfuncs/assess_funcs.go @@ -39,6 +39,108 @@ func WaitUntilPodsAvailable(listOptions metav1.ListOptions, count int, wait time } } +func WaitUntilExpectedSpansPresent( + expectedSpansCount uint, + expectedSpansMetadata map[string]string, + receiverMockNamespace string, + receiverMockServiceName string, + receiverMockServicePort int, + waitDuration time.Duration, + tickDuration time.Duration, +) features.Func { + return func(ctx context.Context, t *testing.T, envConf *envconf.Config) context.Context { + kubectlOpts := *ctxopts.KubectlOptions(ctx) + kubectlOpts.Namespace = receiverMockNamespace + terrak8s.WaitUntilServiceAvailable(t, &kubectlOpts, receiverMockServiceName, int(waitDuration), tickDuration) + + client, closeTunnelFunc := receivermock.NewClientWithK8sTunnel(ctx, t) + defer closeTunnelFunc() + + assert.Eventually(t, func() bool { + spansCount, err := client.GetSpansCount(t, expectedSpansMetadata) + if err != nil { + log.ErrorS(err, "failed getting spans counts from receiver-mock") + return false + } + if spansCount < expectedSpansCount { + log.InfoS( + "received spans, less than expected", + "received", spansCount, + "expected", expectedSpansCount, + ) + return false + } + log.InfoS( + "received enough spans", + "received", spansCount, + "expected", expectedSpansCount, + "metadata", expectedSpansMetadata, + ) + return true + }, waitDuration, tickDuration) + return ctx + } +} + +func WaitUntilExpectedTracesPresent( + expectedTracesCount uint, + expectedSpansPerTraceCount uint, + expectedTracesMetadata map[string]string, + receiverMockNamespace string, + receiverMockServiceName string, + receiverMockServicePort int, + waitDuration time.Duration, + tickDuration time.Duration, +) features.Func { + return func(ctx context.Context, t *testing.T, envConf *envconf.Config) context.Context { + kubectlOpts := *ctxopts.KubectlOptions(ctx) + kubectlOpts.Namespace = receiverMockNamespace + terrak8s.WaitUntilServiceAvailable(t, &kubectlOpts, receiverMockServiceName, int(waitDuration), tickDuration) + + client, closeTunnelFunc := receivermock.NewClientWithK8sTunnel(ctx, t) + defer closeTunnelFunc() + + assert.Eventually(t, func() bool { + tracesLengths, err := client.GetTracesCounts(t, expectedTracesMetadata) + tracesCount := uint(len(tracesLengths)) + if err != nil { + log.ErrorS(err, "failed getting trace counts from receiver-mock") + return false + } + + if tracesCount < expectedTracesCount { + log.InfoS( + "received traces, less than expected", + "received", tracesCount, + "expected", expectedTracesCount, + ) + return false + } + + for i := 0; i < len(tracesLengths); i++ { + if tracesLengths[i] < expectedSpansPerTraceCount { + log.InfoS( + "received enough traces, but less spans than expected", + "received numbers of spans in traces", tracesLengths, + "expected", expectedSpansPerTraceCount, + ) + return false + } + } + + log.InfoS( + "received enough traces and spans", + "received", tracesCount, + "expected", expectedTracesCount, + "expected spans per trace", expectedSpansPerTraceCount, + "metadata", expectedTracesMetadata, + ) + return true + }, waitDuration, tickDuration) + return ctx + } +} + // WaitUntilExpectedMetricsPresent returns a features.Func that can be used in `Assess` calls. // It will wait until all the provided metrics are returned by receiver-mock's HTTP API on // the provided Service and port, until it succeeds or waitDuration passes. diff --git a/tests/integration/internal/stepfuncs/traces.go b/tests/integration/internal/stepfuncs/traces.go new file mode 100644 index 0000000000..ff9911b790 --- /dev/null +++ b/tests/integration/internal/stepfuncs/traces.go @@ -0,0 +1,53 @@ +package stepfuncs + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/SumoLogic/sumologic-kubernetes-collection/tests/integration/internal/tracesgenerator" +) + +// Generate logsCount logs using a deployment. +func GenerateTraces( + tracesPerExporter uint, + spansPerTrace uint, + tracesGeneratorName string, + tracesGeneratorNamespace string, + tracesGeneratorImage string, +) features.Func { + return func(ctx context.Context, t *testing.T, envConf *envconf.Config) context.Context { + client := envConf.Client() + generatorOptions := *tracesgenerator.NewDefaultGeneratorOptions() + generatorOptions.TracesPerExporter = tracesPerExporter + generatorOptions.SpansPerTrace = spansPerTrace + + var namespace corev1.Namespace + err := client.Resources().Get(ctx, tracesGeneratorNamespace, "", &namespace) + if err != nil { + // create the namespace + namespace := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: tracesGeneratorNamespace}} + require.NoError(t, client.Resources().Create(ctx, &namespace)) + } + + deployment := tracesgenerator.GetTracesGeneratorDeployment( + ctx, + tracesGeneratorNamespace, + tracesGeneratorName, + tracesGeneratorImage, + generatorOptions, + ) + + // create the deployment + err = client.Resources(tracesGeneratorNamespace).Create(ctx, &deployment) + require.NoError(t, err) + + return ctx + } +} diff --git a/tests/integration/internal/tracesgenerator/tracesgenerator.go b/tests/integration/internal/tracesgenerator/tracesgenerator.go new file mode 100644 index 0000000000..3dc2ed10c8 --- /dev/null +++ b/tests/integration/internal/tracesgenerator/tracesgenerator.go @@ -0,0 +1,127 @@ +package tracesgenerator + +import ( + "context" + "fmt" + "strconv" + "time" + + "github.com/SumoLogic/sumologic-kubernetes-collection/tests/integration/internal/ctxopts" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + generatorBinaryName = "customer-trace-tester" + deploymentSleepTime = time.Hour * 24 // how much time we spend sleeping after generating logs in a Deployment +) + +type TracesGeneratorOptions struct { + // For all of these options, 0 and "" respectively are treated as "not set" + + // Total number of traces generated per exporter + TracesPerExporter uint + // Number of spans per every trace + SpansPerTrace uint + + // Exporter options + otlpHttpEnabled bool + otlpGrpcEnabled bool + zipkinEnabled bool + jaegerThriftHttp bool +} + +func NewDefaultGeneratorOptions() *TracesGeneratorOptions { + return &TracesGeneratorOptions{ + TracesPerExporter: 40, + SpansPerTrace: 5, + otlpHttpEnabled: true, + otlpGrpcEnabled: true, + zipkinEnabled: true, + jaegerThriftHttp: true, + } +} + +func GetTracesGeneratorDeployment( + ctx context.Context, + namespace string, + name string, + image string, + options TracesGeneratorOptions, +) appsv1.Deployment { + var replicas int32 = 1 + appLabels := map[string]string{ + "app": name, + } + metadata := metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: appLabels, + } + + release := ctxopts.HelmRelease(ctx) + otelcolNamespace := ctxopts.Namespace(ctx) + colName := fmt.Sprintf("%s-sumologic-otelcol.%s", release, otelcolNamespace) + + podTemplateSpec := corev1.PodTemplateSpec{ + ObjectMeta: metadata, + Spec: corev1.PodSpec{ + Containers: optionsToContainers(ctx, options, name, image, colName), + }, + } + return appsv1.Deployment{ + ObjectMeta: metadata, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: appLabels, + }, + Template: podTemplateSpec, + }, + } +} + +func optionsToContainers(ctx context.Context, options TracesGeneratorOptions, name string, image string, colName string) []corev1.Container { + // There's no way to tell the log generator to keep running after it's done generating logs. This is annoying if + // we want to run it in a Deployment and not have it be restarted after exiting. So we sleep after it exits. + command := fmt.Sprintf("%s; sleep %f", generatorBinaryName, deploymentSleepTime.Seconds()) + return []corev1.Container{ + { + Name: name, + Image: image, + Command: []string{"/bin/bash", "-c", "--"}, + Args: []string{command}, + Env: []corev1.EnvVar{ + { + Name: "COLLECTOR_HOSTNAME", + Value: colName, + }, + { + Name: "TOTAL_TRACES", + Value: strconv.Itoa(int(options.TracesPerExporter)), + }, + { + Name: "SPANS_PER_TRACE", + Value: strconv.Itoa(int(options.SpansPerTrace)), + }, + { + Name: "OTLP_HTTP", + Value: strconv.FormatBool(options.otlpHttpEnabled), + }, + { + Name: "OTLP_GRPC", + Value: strconv.FormatBool(options.otlpGrpcEnabled), + }, + { + Name: "ZIPKIN", + Value: strconv.FormatBool(options.zipkinEnabled), + }, + { + Name: "JAEGER_THRIFT_HTTP", + Value: strconv.FormatBool(options.jaegerThriftHttp), + }, + }, + }, + } +} diff --git a/tests/integration/values/values_helm_otelcol_traces.yaml b/tests/integration/values/values_helm_otelcol_traces.yaml new file mode 100644 index 0000000000..3928530d3c --- /dev/null +++ b/tests/integration/values/values_helm_otelcol_traces.yaml @@ -0,0 +1,23 @@ +sumologic: + logs: + enabled: false + + metrics: + enabled: false + + traces: + enabled: true + +fluent-bit: + enabled: false + +fluentd: + events: + enabled: false + +otelcol: + config: + # Default otlp pipeline from values.yaml is used. + exporters: + otlphttp: + traces_endpoint: http://receiver-mock.receiver-mock:3000/receiver/v1/traces diff --git a/tests/integration/yamls/receiver-mock.yaml b/tests/integration/yamls/receiver-mock.yaml index f90d4c3dbe..7506e1c2cd 100644 --- a/tests/integration/yamls/receiver-mock.yaml +++ b/tests/integration/yamls/receiver-mock.yaml @@ -34,8 +34,10 @@ spec: - --print-headers - --print-logs - --print-metrics + - --store-traces - --store-logs - --store-metrics + - --print-spans resources: {} securityContext: capabilities: