diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a9e10f0..fca6ad35 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,10 +9,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added +- Add kubernetes events logging in Alloy. - Add support for Private CAs in alloy logs. - Add KubeEventsLogger option and related methods in loggedCLuster package. - Add `events-logger` flag in the operator. - Add toggle for `events-logger` in observability-bundle configmap. +- Add tests for `alloy-events` in events-logger-config. ### Changed diff --git a/main.go b/main.go index 345015de..d00fc51b 100644 --- a/main.go +++ b/main.go @@ -167,15 +167,15 @@ func main() { DefaultWorkloadClusterNamespaces: defaultNamespaces, } - eventsLoggerSecret := eventsloggersecret.Reconciler{ - Client: mgr.GetClient(), - } - eventsLoggerConfig := eventsloggerconfig.Reconciler{ Client: mgr.GetClient(), DefaultWorkloadClusterNamespaces: defaultNamespaces, } + eventsLoggerSecret := eventsloggersecret.Reconciler{ + Client: mgr.GetClient(), + } + loggedcluster.O.EnableLoggingFlag = enableLogging loggedcluster.O.LoggingAgent = loggingAgent loggedcluster.O.KubeEventsLogger = eventsLogger diff --git a/pkg/common/common.go b/pkg/common/common.go index 6c35cb31..c2fab44f 100644 --- a/pkg/common/common.go +++ b/pkg/common/common.go @@ -40,6 +40,10 @@ const ( AlloyLogAgentAppName = "alloy-logs" AlloyLogAgentAppNamespace = "kube-system" + // Alloy app name and namespace when using Alloy as events logger. + AlloyEventsLoggerAppName = "alloy-events" + AlloyEventsLoggerAppNamespace = "kube-system" + MaxBackoffPeriod = "10m" LokiURLFormat = "https://%s/loki/api/v1/push" ) diff --git a/pkg/resource/agents-toggle/observability_bundle_configmap.go b/pkg/resource/agents-toggle/observability_bundle_configmap.go index bf3af29f..49a80184 100644 --- a/pkg/resource/agents-toggle/observability_bundle_configmap.go +++ b/pkg/resource/agents-toggle/observability_bundle_configmap.go @@ -59,30 +59,33 @@ func GenerateObservabilityBundleConfigMap(ctx context.Context, lc loggedcluster. return v1.ConfigMap{}, errors.Errorf("unsupported logging agent %q", lc.GetLoggingAgent()) } - // Enforce grafana-agent as events logger when observability-bundle version < 1.9.0 because this needs alloy 0.7.0. - if observabilityBundleVersion.LT(semver.MustParse("1.9.0")) && lc.GetKubeEventsLogger() == common.EventsLoggerAlloy { - logger := log.FromContext(ctx) - logger.Info("Alloy events logger is not supported by observability bundle, using grafana-agent instead.", "observability-bundle-version", observabilityBundleVersion, "events-logger", lc.GetKubeEventsLogger()) - lc.SetKubeEventsLogger(common.EventsLoggerGrafanaAgent) - } - - switch lc.GetKubeEventsLogger() { - case common.EventsLoggerGrafanaAgent: - appsToEnable["grafanaAgent"] = app{ - Enabled: true, - } - appsToEnable["alloyEvents"] = app{ - Enabled: false, + // If observability-bundle version >= 0.9.0, events loggers can be enabled. + if observabilityBundleVersion.GT(semver.MustParse("0.9.0")) { + // Enforce grafana-agent as events logger when observability-bundle version < 1.9.0 because this needs alloy 0.7.0. + if observabilityBundleVersion.LT(semver.MustParse("1.9.0")) && lc.GetKubeEventsLogger() == common.EventsLoggerAlloy { + logger := log.FromContext(ctx) + logger.Info("Alloy events logger is not supported by observability bundle, using grafana-agent instead.", "observability-bundle-version", observabilityBundleVersion, "events-logger", lc.GetKubeEventsLogger()) + lc.SetKubeEventsLogger(common.EventsLoggerGrafanaAgent) } - case common.EventsLoggerAlloy: - appsToEnable["grafanaAgent"] = app{ - Enabled: false, - } - appsToEnable["alloyEvents"] = app{ - Enabled: true, + + switch lc.GetKubeEventsLogger() { + case common.EventsLoggerGrafanaAgent: + appsToEnable["grafanaAgent"] = app{ + Enabled: true, + } + appsToEnable["alloyEvents"] = app{ + Enabled: false, + } + case common.EventsLoggerAlloy: + appsToEnable["grafanaAgent"] = app{ + Enabled: false, + } + appsToEnable["alloyEvents"] = app{ + Enabled: true, + } + default: + return v1.ConfigMap{}, errors.Errorf("unsupported events logger %q", lc.GetKubeEventsLogger()) } - default: - return v1.ConfigMap{}, errors.Errorf("unsupported events logger %q", lc.GetKubeEventsLogger()) } values := Values{ diff --git a/pkg/resource/agents-toggle/reconciler.go b/pkg/resource/agents-toggle/reconciler.go index 269bcbd9..dbb666a8 100644 --- a/pkg/resource/agents-toggle/reconciler.go +++ b/pkg/resource/agents-toggle/reconciler.go @@ -28,36 +28,36 @@ type Reconciler struct { Scheme *runtime.Scheme } -// ReconcileCreate ensure logging agents are enabled in the given cluster. +// ReconcileCreate ensure logging agents and events loggers are enabled in the given cluster. func (r *Reconciler) ReconcileCreate(ctx context.Context, lc loggedcluster.Interface) (ctrl.Result, error) { logger := log.FromContext(ctx) - logger.Info("Logging agents toggle create") + logger.Info("agents toggle create") observabilityBundleVersion, err := common.GetObservabilityBundleAppVersion(lc, r.Client, ctx) if err != nil { // Handle case where the app is not found. if apimachineryerrors.IsNotFound(err) { - logger.Info("logging-agents-toggle - observability bundle app not found, requeueing") + logger.Info("agents-toggle - observability bundle app not found, requeueing") // If the app is not found we should requeue and try again later (5 minutes is the app platform default reconciliation time) return ctrl.Result{RequeueAfter: time.Duration(5 * time.Minute)}, nil } return ctrl.Result{}, errors.WithStack(err) } - // Get desired configmap to enable logging agents. + // Get desired configmap to enable logging agents and events loggers. desiredConfigMap, err := GenerateObservabilityBundleConfigMap(ctx, lc, observabilityBundleVersion) if err != nil { return ctrl.Result{}, errors.WithStack(err) } // Check if configmap is already installed. - logger.Info("Logging agents toggle checking", "namespace", desiredConfigMap.GetNamespace(), "name", desiredConfigMap.GetName()) + logger.Info("agents toggle checking", "namespace", desiredConfigMap.GetNamespace(), "name", desiredConfigMap.GetName()) var currentConfigMap v1.ConfigMap err = r.Client.Get(ctx, types.NamespacedName{Name: desiredConfigMap.GetName(), Namespace: desiredConfigMap.GetNamespace()}, ¤tConfigMap) if err != nil { if apimachineryerrors.IsNotFound(err) { // Install configmap. - logger.Info("Logging agents toggle not found, creating") + logger.Info("agents toggle not found, creating") err = r.Client.Create(ctx, &desiredConfigMap) } if err != nil { @@ -66,7 +66,7 @@ func (r *Reconciler) ReconcileCreate(ctx context.Context, lc loggedcluster.Inter } if needUpdate(currentConfigMap, desiredConfigMap) { - logger.Info("Logging agents toggle updating") + logger.Info("agents toggle updating") // Update configmap // Configmap is installed and need to be updated. err := r.Client.Update(ctx, &desiredConfigMap) @@ -74,16 +74,16 @@ func (r *Reconciler) ReconcileCreate(ctx context.Context, lc loggedcluster.Inter return ctrl.Result{}, errors.WithStack(err) } } else { - logger.Info("Logging agents toggle up to date") + logger.Info("agents toggle up to date") } return ctrl.Result{}, nil } -// ReconcileDelete ensure logging agents are disabled for the given cluster. +// ReconcileDelete ensure logging agents and events loggers are disabled for the given cluster. func (r *Reconciler) ReconcileDelete(ctx context.Context, lc loggedcluster.Interface) (ctrl.Result, error) { logger := log.FromContext(ctx) - logger.Info("Logging agents toggle delete") + logger.Info("agents toggle delete") // Get observability bundle app metadata. appMeta := common.ObservabilityBundleAppMeta(lc) @@ -93,7 +93,7 @@ func (r *Reconciler) ReconcileDelete(ctx context.Context, lc loggedcluster.Inter if err != nil { // Handle case where the app is not found. if apimachineryerrors.IsNotFound(err) { - logger.Info("logging-agents-toggle - observability bundle app not found, skipping deletion") + logger.Info("agents-toggle - observability bundle app not found, skipping deletion") // If the app is not found we ignore the error and return, as this means the app was already deleted. return ctrl.Result{}, nil } @@ -105,6 +105,11 @@ func (r *Reconciler) ReconcileDelete(ctx context.Context, lc loggedcluster.Inter return ctrl.Result{}, errors.WithStack(err) } + // If the observability-bundle version is too old, we don't need to do anything. + if observabilityBundleVersion.LT(semver.MustParse("0.9.0")) { + return ctrl.Result{}, nil + } + // Get expected configmap. desiredConfigMap, err := GenerateObservabilityBundleConfigMap(ctx, lc, observabilityBundleVersion) if err != nil { @@ -112,18 +117,18 @@ func (r *Reconciler) ReconcileDelete(ctx context.Context, lc loggedcluster.Inter } // Delete configmap. - logger.Info("Logging agents toggle deleting", "namespace", desiredConfigMap.GetNamespace(), "name", desiredConfigMap.GetName()) + logger.Info("agents toggle deleting", "namespace", desiredConfigMap.GetNamespace(), "name", desiredConfigMap.GetName()) err = r.Client.Delete(ctx, &desiredConfigMap) if err != nil { if apimachineryerrors.IsNotFound(err) { // Do no throw error in case it was not found, as this means // it was already deleted. - logger.Info("Logging agents toggle already deleted") + logger.Info("agents toggle already deleted") } else if err != nil { return ctrl.Result{}, errors.WithStack(err) } } else { - logger.Info("Logging agents toggle deleted") + logger.Info("agents toggle deleted") } return ctrl.Result{}, nil diff --git a/pkg/resource/events-logger-config/alloy-events-config.go b/pkg/resource/events-logger-config/alloy-events-config.go new file mode 100644 index 00000000..f816f78b --- /dev/null +++ b/pkg/resource/events-logger-config/alloy-events-config.go @@ -0,0 +1,86 @@ +package eventsloggerconfig + +import ( + "bytes" + _ "embed" + "fmt" + "text/template" + + "github.com/Masterminds/sprig/v3" + + "github.com/giantswarm/logging-operator/pkg/common" + loggedcluster "github.com/giantswarm/logging-operator/pkg/logged-cluster" + loggingsecret "github.com/giantswarm/logging-operator/pkg/resource/logging-secret" +) + +var ( + //go:embed alloy/events-logger.alloy.template + alloyEvents string + alloyEventsTemplate *template.Template + + //go:embed alloy/events-logger-config.alloy.yaml.template + alloyEventsConfig string + alloyEventsConfigTemplate *template.Template +) + +func init() { + alloyEventsTemplate = template.Must(template.New("events-logger.alloy").Funcs(sprig.FuncMap()).Parse(alloyEvents)) + alloyEventsConfigTemplate = template.Must(template.New("events-logger-config.alloy.yaml").Funcs(sprig.FuncMap()).Parse(alloyEventsConfig)) +} + +func generateAlloyEventsConfig(lc loggedcluster.Interface, defaultNamespaces []string) (string, error) { + var values bytes.Buffer + + alloyConfig, err := generateAlloyConfig(lc, defaultNamespaces) + if err != nil { + return "", err + } + + data := struct { + AlloyConfig string + SecretName string + }{ + AlloyConfig: alloyConfig, + SecretName: common.AlloyEventsLoggerAppName, + } + + err = alloyEventsConfigTemplate.Execute(&values, data) + if err != nil { + return "", err + } + + return values.String(), nil +} + +func generateAlloyConfig(lc loggedcluster.Interface, defaultNamespaces []string) (string, error) { + var values bytes.Buffer + + data := struct { + ClusterID string + Installation string + InsecureSkipVerify string + MaxBackoffPeriod string + LokiURLEnvVarName string + TenantIDEnvVarName string + BasicAuthUsernameEnvVarName string + BasicAuthPasswordEnvVarName string + ScrapedNamespaces string + }{ + ClusterID: lc.GetClusterName(), + Installation: lc.GetInstallationName(), + InsecureSkipVerify: fmt.Sprintf("%t", lc.IsInsecureCA()), + MaxBackoffPeriod: common.MaxBackoffPeriod, + LokiURLEnvVarName: loggingsecret.AlloyLokiURLEnvVarName, + TenantIDEnvVarName: loggingsecret.AlloyTenantIDEnvVarName, + BasicAuthUsernameEnvVarName: loggingsecret.AlloyBasicAuthUsernameEnvVarName, + BasicAuthPasswordEnvVarName: loggingsecret.AlloyBasicAuthPasswordEnvVarName, + ScrapedNamespaces: common.FormatScrapedNamespaces(lc, defaultNamespaces), + } + + err := alloyEventsTemplate.Execute(&values, data) + if err != nil { + return "", err + } + + return values.String(), nil +} diff --git a/pkg/resource/events-logger-config/alloy-events-config_test.go b/pkg/resource/events-logger-config/alloy-events-config_test.go new file mode 100644 index 00000000..9766fc99 --- /dev/null +++ b/pkg/resource/events-logger-config/alloy-events-config_test.go @@ -0,0 +1,73 @@ +package eventsloggerconfig + +import ( + _ "embed" + "os" + "path/filepath" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + capi "sigs.k8s.io/cluster-api/api/v1beta1" + + "github.com/google/go-cmp/cmp" + + loggedcluster "github.com/giantswarm/logging-operator/pkg/logged-cluster" + "github.com/giantswarm/logging-operator/pkg/logged-cluster/capicluster" +) + +func TestGenerateAlloyEventsConfig(t *testing.T) { + testCases := []struct { + goldenFile string + defaultNamespaces []string + installationName string + clusterName string + }{ + { + goldenFile: "alloy/test/events-logger-config.alloy.MC.yaml", + installationName: "test-installation", + clusterName: "test-installation", + }, + { + goldenFile: "alloy/test/events-logger-config.alloy.WC.yaml", + installationName: "test-installation", + clusterName: "test-cluster", + }, + } + + for _, tc := range testCases { + t.Run(filepath.Base(tc.goldenFile), func(t *testing.T) { + golden, err := os.ReadFile(tc.goldenFile) + if err != nil { + t.Fatalf("Failed to read golden file: %v", err) + } + + loggedCluster := &capicluster.Object{ + Object: &capi.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: tc.clusterName, + }, + }, + Options: loggedcluster.Options{ + InstallationName: tc.installationName, + KubeEventsLogger: "alloy", + }, + } + + config, err := generateAlloyEventsConfig(loggedCluster, []string{"kube-system", "giantswarm"}) + if err != nil { + t.Fatalf("Failed to generate alloy config: %v", err) + } + + if string(golden) != config { + t.Logf("Generated config differs from %s, diff:\n%s", tc.goldenFile, cmp.Diff(string(golden), config)) + t.Fail() + if *update { + //nolint:gosec + if err := os.WriteFile(tc.goldenFile, []byte(config), 0644); err != nil { + t.Fatalf("Failed to update golden file: %v", err) + } + } + } + }) + } +} diff --git a/pkg/resource/events-logger-config/alloy/events-logger-config.alloy.yaml.template b/pkg/resource/events-logger-config/alloy/events-logger-config.alloy.yaml.template new file mode 100644 index 00000000..fa698ed7 --- /dev/null +++ b/pkg/resource/events-logger-config/alloy/events-logger-config.alloy.yaml.template @@ -0,0 +1,30 @@ +# This file was generated by logging-operator. +# It configures Alloy to be used as events logger. +# - configMap is generated from events-logger.alloy.template and passed as a string +# here and will be created by Alloy's chart. +# - Alloy runs as a deployment, with only 1 replica. +alloy: + alloy: + configMap: + create: true + content: |- + {{- .AlloyConfig | nindent 8 }} + envFrom: + - secretRef: + name: {{ .SecretName }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: false + runAsUser: 10 + runAsGroup: 10 + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + controller: + type: deployment + replicas: 1 + crds: + create: false diff --git a/pkg/resource/events-logger-config/alloy/events-logger.alloy.template b/pkg/resource/events-logger-config/alloy/events-logger.alloy.template new file mode 100644 index 00000000..e99d872e --- /dev/null +++ b/pkg/resource/events-logger-config/alloy/events-logger.alloy.template @@ -0,0 +1,32 @@ +loki.source.kubernetes_events "local" { + namespaces = {{ .ScrapedNamespaces }} + forward_to = [loki.write.default.receiver] +} + +// Loki target configuration +loki.write "default" { + endpoint { + url = env("{{ .LokiURLEnvVarName }}") + max_backoff_period = "{{ .MaxBackoffPeriod }}" + tenant_id = env("{{ .TenantIDEnvVarName }}") + + basic_auth { + username = env("{{ .BasicAuthUsernameEnvVarName }}") + password = env("{{ .BasicAuthPasswordEnvVarName }}") + } + + tls_config { + insecure_skip_verify = {{ .InsecureSkipVerify }} + } + } + external_labels = { + cluster_id = "{{ .ClusterID }}", + installation = "{{ .Installation }}", + scrape_job = "kubernetes-events", + } +} + +logging { + level = "info" + format = "logfmt" +} diff --git a/pkg/resource/events-logger-config/alloy/test/events-logger-config.alloy.MC.yaml b/pkg/resource/events-logger-config/alloy/test/events-logger-config.alloy.MC.yaml new file mode 100644 index 00000000..6b062d2d --- /dev/null +++ b/pkg/resource/events-logger-config/alloy/test/events-logger-config.alloy.MC.yaml @@ -0,0 +1,62 @@ +# This file was generated by logging-operator. +# It configures Alloy to be used as events logger. +# - configMap is generated from events-logger.alloy.template and passed as a string +# here and will be created by Alloy's chart. +# - Alloy runs as a deployment, with only 1 replica. +alloy: + alloy: + configMap: + create: true + content: |- + loki.source.kubernetes_events "local" { + namespaces = [] + forward_to = [loki.write.default.receiver] + } + + // Loki target configuration + loki.write "default" { + endpoint { + url = env("LOKI_URL") + max_backoff_period = "10m" + tenant_id = env("TENANT_ID") + + basic_auth { + username = env("BASIC_AUTH_USERNAME") + password = env("BASIC_AUTH_PASSWORD") + } + + tls_config { + insecure_skip_verify = false + } + } + external_labels = { + cluster_id = "test-installation", + installation = "test-installation", + scrape_job = "kubernetes-events", + } + } + + logging { + level = "info" + format = "logfmt" + } + + envFrom: + - secretRef: + name: alloy-events + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: false + runAsUser: 10 + runAsGroup: 10 + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + controller: + type: deployment + replicas: 1 + crds: + create: false diff --git a/pkg/resource/events-logger-config/alloy/test/events-logger-config.alloy.WC.yaml b/pkg/resource/events-logger-config/alloy/test/events-logger-config.alloy.WC.yaml new file mode 100644 index 00000000..a0302c5b --- /dev/null +++ b/pkg/resource/events-logger-config/alloy/test/events-logger-config.alloy.WC.yaml @@ -0,0 +1,62 @@ +# This file was generated by logging-operator. +# It configures Alloy to be used as events logger. +# - configMap is generated from events-logger.alloy.template and passed as a string +# here and will be created by Alloy's chart. +# - Alloy runs as a deployment, with only 1 replica. +alloy: + alloy: + configMap: + create: true + content: |- + loki.source.kubernetes_events "local" { + namespaces = ["kube-system", "giantswarm"] + forward_to = [loki.write.default.receiver] + } + + // Loki target configuration + loki.write "default" { + endpoint { + url = env("LOKI_URL") + max_backoff_period = "10m" + tenant_id = env("TENANT_ID") + + basic_auth { + username = env("BASIC_AUTH_USERNAME") + password = env("BASIC_AUTH_PASSWORD") + } + + tls_config { + insecure_skip_verify = false + } + } + external_labels = { + cluster_id = "test-cluster", + installation = "test-installation", + scrape_job = "kubernetes-events", + } + } + + logging { + level = "info" + format = "logfmt" + } + + envFrom: + - secretRef: + name: alloy-events + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: false + runAsUser: 10 + runAsGroup: 10 + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + controller: + type: deployment + replicas: 1 + crds: + create: false diff --git a/pkg/resource/events-logger-config/events-logger-config.go b/pkg/resource/events-logger-config/events-logger-config.go new file mode 100644 index 00000000..fd5d3fa5 --- /dev/null +++ b/pkg/resource/events-logger-config/events-logger-config.go @@ -0,0 +1,68 @@ +package eventsloggerconfig + +import ( + "fmt" + + "github.com/pkg/errors" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/giantswarm/logging-operator/pkg/common" + loggedcluster "github.com/giantswarm/logging-operator/pkg/logged-cluster" +) + +const ( + eventsLogggerConfigName = "events-logger-config" + grafanaAgentConfigName = "grafana-agent-config" +) + +func generateEventsLoggerConfig(lc loggedcluster.Interface, defaultNamespaces []string) (v1.ConfigMap, error) { + var values string + var err error + + switch lc.GetKubeEventsLogger() { + case common.EventsLoggerGrafanaAgent: + values, err = generateGrafanaAgentConfig(lc, defaultNamespaces) + if err != nil { + return v1.ConfigMap{}, err + } + case common.EventsLoggerAlloy: + values, err = generateAlloyEventsConfig(lc, defaultNamespaces) + if err != nil { + return v1.ConfigMap{}, err + } + default: + return v1.ConfigMap{}, errors.Errorf("unsupported events logger %q", lc.GetKubeEventsLogger()) + } + + configmap := v1.ConfigMap{ + ObjectMeta: configMeta(lc), + Data: map[string]string{ + "values": values, + }, + } + + return configmap, nil +} + +// ConfigMeta returns metadata for the logging-config +func configMeta(lc loggedcluster.Interface) metav1.ObjectMeta { + metadata := metav1.ObjectMeta{ + Name: getEventsLoggerConfigName(lc), + Namespace: lc.GetAppsNamespace(), + Labels: map[string]string{}, + } + + common.AddCommonLabels(metadata.Labels) + return metadata +} + +func getEventsLoggerConfigName(lc loggedcluster.Interface) string { + switch lc.GetKubeEventsLogger() { + case common.EventsLoggerGrafanaAgent: + return fmt.Sprintf("%s-%s", lc.GetClusterName(), grafanaAgentConfigName) + default: + return fmt.Sprintf("%s-%s", lc.GetClusterName(), eventsLogggerConfigName) + } +} diff --git a/pkg/resource/events-logger-config/grafana-agent-config.go b/pkg/resource/events-logger-config/grafana-agent-config.go index 5c4c5472..6ccf8381 100644 --- a/pkg/resource/events-logger-config/grafana-agent-config.go +++ b/pkg/resource/events-logger-config/grafana-agent-config.go @@ -7,14 +7,10 @@ import ( "text/template" "github.com/Masterminds/sprig/v3" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/giantswarm/logging-operator/pkg/common" loggedcluster "github.com/giantswarm/logging-operator/pkg/logged-cluster" -) - -const ( - grafanaAgentConfigName = "grafana-agent-config" + eventsloggersecret "github.com/giantswarm/logging-operator/pkg/resource/events-logger-secret" ) var ( @@ -32,18 +28,6 @@ func init() { grafanaAgentConfigTemplate = template.Must(template.New("events-logger-config.grafanaagent.yaml").Funcs(sprig.FuncMap()).Parse(grafanaAgentConfig)) } -// configMeta returns metadata for the grafana-agent-config -func configMeta(lc loggedcluster.Interface) metav1.ObjectMeta { - metadata := metav1.ObjectMeta{ - Name: getGrafanaAgentConfigName(lc), - Namespace: lc.GetAppsNamespace(), - Labels: map[string]string{}, - } - - common.AddCommonLabels(metadata.Labels) - return metadata -} - // generateGrafanaAgentConfig returns a configmap for // the grafana-agent extra-config func generateGrafanaAgentConfig(lc loggedcluster.Interface, defaultWorkloadClusterNamespaces []string) (string, error) { @@ -60,7 +44,7 @@ func generateGrafanaAgentConfig(lc loggedcluster.Interface, defaultWorkloadClust GrafanaAgentInnerConfig: grafanaAgentInnerConfig, } - err = grafanaAgentTemplate.Execute(&values, data) + err = grafanaAgentConfigTemplate.Execute(&values, data) if err != nil { return "", err } @@ -81,18 +65,14 @@ func generateGrafanaAgentInnerConfig(lc loggedcluster.Interface, defaultWorkload ClusterID: lc.GetClusterName(), Installation: lc.GetInstallationName(), InsecureSkipVerify: fmt.Sprintf("%t", lc.IsInsecureCA()), - SecretName: fmt.Sprintf("%s-%s", lc.GetClusterName(), common.GrafanaAgentExtraSecretName()), + SecretName: eventsloggersecret.GetEventsLoggerSecretName(lc), ScrapedNamespaces: common.FormatScrapedNamespaces(lc, defaultWorkloadClusterNamespaces), } - err := grafanaAgentConfigTemplate.Execute(&values, data) + err := grafanaAgentTemplate.Execute(&values, data) if err != nil { return "", err } return values.String(), nil } - -func getGrafanaAgentConfigName(lc loggedcluster.Interface) string { - return fmt.Sprintf("%s-%s", lc.GetClusterName(), grafanaAgentConfigName) -} diff --git a/pkg/resource/events-logger-config/grafana-agent-config_test.go b/pkg/resource/events-logger-config/grafana-agent-config_test.go index 51abd9b6..f84123a1 100644 --- a/pkg/resource/events-logger-config/grafana-agent-config_test.go +++ b/pkg/resource/events-logger-config/grafana-agent-config_test.go @@ -54,6 +54,7 @@ func TestGenerateGrafanaAgentConfig(t *testing.T) { }, Options: loggedcluster.Options{ InstallationName: tc.installationName, + KubeEventsLogger: "grafana-agent", }, } diff --git a/pkg/resource/events-logger-config/grafana-agent/events-logger-config.grafanaagent.yaml.template b/pkg/resource/events-logger-config/grafana-agent/events-logger-config.grafanaagent.yaml.template index 5d4c5439..fa2a0b1d 100644 --- a/pkg/resource/events-logger-config/grafana-agent/events-logger-config.grafanaagent.yaml.template +++ b/pkg/resource/events-logger-config/grafana-agent/events-logger-config.grafanaagent.yaml.template @@ -1,35 +1,17 @@ -logging { - level = "info" - format = "logfmt" -} - -loki.source.kubernetes_events "local" { - namespaces = {{ .ScrapedNamespaces }} - forward_to = [loki.write.default.receiver] -} - -remote.kubernetes.secret "credentials" { - namespace = "kube-system" - name = "{{ .SecretName }}" -} - -loki.write "default" { - endpoint { - url = nonsensitive(remote.kubernetes.secret.credentials.data["logging-url"]) - tenant_id = nonsensitive(remote.kubernetes.secret.credentials.data["logging-tenant-id"]) - - basic_auth { - username = nonsensitive(remote.kubernetes.secret.credentials.data["logging-username"]) - password = remote.kubernetes.secret.credentials.data["logging-password"] - } - - tls_config { - insecure_skip_verify = {{ .InsecureSkipVerify }} - } - } - external_labels = { - installation = "{{ .Installation }}", - cluster_id = "{{ .ClusterID }}", - scrape_job = "kubernetes-events", - } -} +# This file was generated by logging-operator. +# It configures the Grafana-agent to be used as events logger. +# - configMap is generated from events-logger.grafanaagent.template and passed as a string +# here and will be created by Grafana-agent's chart. +# - Grafana-agent runs as a deployment, with only 1 replica. +grafana-agent: + agent: + configMap: + content: |- + {{- .GrafanaAgentInnerConfig | nindent 8 }} + extraArgs: + - --disable-reporting + controller: + replicas: 1 + type: deployment + crds: + create: false diff --git a/pkg/resource/events-logger-config/grafana-agent/events-logger.grafanaagent.template b/pkg/resource/events-logger-config/grafana-agent/events-logger.grafanaagent.template index fa2a0b1d..5d4c5439 100644 --- a/pkg/resource/events-logger-config/grafana-agent/events-logger.grafanaagent.template +++ b/pkg/resource/events-logger-config/grafana-agent/events-logger.grafanaagent.template @@ -1,17 +1,35 @@ -# This file was generated by logging-operator. -# It configures the Grafana-agent to be used as events logger. -# - configMap is generated from events-logger.grafanaagent.template and passed as a string -# here and will be created by Grafana-agent's chart. -# - Grafana-agent runs as a deployment, with only 1 replica. -grafana-agent: - agent: - configMap: - content: |- - {{- .GrafanaAgentInnerConfig | nindent 8 }} - extraArgs: - - --disable-reporting - controller: - replicas: 1 - type: deployment - crds: - create: false +logging { + level = "info" + format = "logfmt" +} + +loki.source.kubernetes_events "local" { + namespaces = {{ .ScrapedNamespaces }} + forward_to = [loki.write.default.receiver] +} + +remote.kubernetes.secret "credentials" { + namespace = "kube-system" + name = "{{ .SecretName }}" +} + +loki.write "default" { + endpoint { + url = nonsensitive(remote.kubernetes.secret.credentials.data["logging-url"]) + tenant_id = nonsensitive(remote.kubernetes.secret.credentials.data["logging-tenant-id"]) + + basic_auth { + username = nonsensitive(remote.kubernetes.secret.credentials.data["logging-username"]) + password = remote.kubernetes.secret.credentials.data["logging-password"] + } + + tls_config { + insecure_skip_verify = {{ .InsecureSkipVerify }} + } + } + external_labels = { + installation = "{{ .Installation }}", + cluster_id = "{{ .ClusterID }}", + scrape_job = "kubernetes-events", + } +} diff --git a/pkg/resource/events-logger-config/reconciler.go b/pkg/resource/events-logger-config/reconciler.go index f95174fd..4db3cb19 100644 --- a/pkg/resource/events-logger-config/reconciler.go +++ b/pkg/resource/events-logger-config/reconciler.go @@ -3,15 +3,12 @@ package eventsloggerconfig import ( "context" "reflect" - "time" - appv1 "github.com/giantswarm/apiextensions-application/api/v1alpha1" "github.com/pkg/errors" v1 "k8s.io/api/core/v1" apimachineryerrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" - "github.com/giantswarm/logging-operator/pkg/common" loggedcluster "github.com/giantswarm/logging-operator/pkg/logged-cluster" ctrl "sigs.k8s.io/controller-runtime" @@ -20,51 +17,31 @@ import ( ) // Reconciler implements a reconciler.Interface to handle -// GrafanaAgent config: extra grafana-agent config defining what we want to retrieve. +// EventsLogger config: extra events-logger config defining what we want to retrieve. type Reconciler struct { client.Client DefaultWorkloadClusterNamespaces []string } -// ReconcileCreate ensures grafana-agent config is created with the right credentials +// ReconcileCreate ensures events-logger config is created with the right credentials func (r *Reconciler) ReconcileCreate(ctx context.Context, lc loggedcluster.Interface) (ctrl.Result, error) { logger := log.FromContext(ctx) - logger.Info("grafana-agent-config create") - - // Get observability bundle app metadata. - appMeta := common.ObservabilityBundleAppMeta(lc) - // Retrieve the app. - var currentApp appv1.App - if err := r.Client.Get(ctx, types.NamespacedName{Name: lc.AppConfigName("grafana-agent"), Namespace: appMeta.GetNamespace()}, ¤tApp); err != nil { - if apimachineryerrors.IsNotFound(err) { - logger.Info("grafana-agent-config - app not found, requeuing") - // If the app is not found we should requeue and try again later (5 minutes is the app platform default reconciliation time) - return ctrl.Result{RequeueAfter: time.Duration(5 * time.Minute)}, nil - } - return ctrl.Result{}, errors.WithStack(err) - } + logger.Info("events-logger-config create") // Get desired config - values, err := generateGrafanaAgentConfig(lc, r.DefaultWorkloadClusterNamespaces) + desiredEventsLoggerConfig, err := generateEventsLoggerConfig(lc, r.DefaultWorkloadClusterNamespaces) if err != nil { - logger.Info("grafana-agent-config - failed generating grafana-agent config!", "error", err) + logger.Info("events-logger-config - failed generating events-logger config!", "error", err) return ctrl.Result{}, errors.WithStack(err) } - desiredEventsLoggerConfig := v1.ConfigMap{ - ObjectMeta: configMeta(lc), - Data: map[string]string{ - "values": values, - }, - } - // Check if config already exists. - logger.Info("grafana-agent-config - getting", "namespace", desiredEventsLoggerConfig.GetNamespace(), "name", desiredEventsLoggerConfig.GetName()) + logger.Info("events-logger-config - getting", "namespace", desiredEventsLoggerConfig.GetNamespace(), "name", desiredEventsLoggerConfig.GetName()) var currentEventsLoggerConfig v1.ConfigMap err = r.Client.Get(ctx, types.NamespacedName{Name: desiredEventsLoggerConfig.GetName(), Namespace: desiredEventsLoggerConfig.GetNamespace()}, ¤tEventsLoggerConfig) if err != nil { if apimachineryerrors.IsNotFound(err) { - logger.Info("grafana-agent-config not found, creating") + logger.Info("events-logger-config not found, creating") err = r.Client.Create(ctx, &desiredEventsLoggerConfig) if err != nil { return ctrl.Result{}, errors.WithStack(err) @@ -75,49 +52,49 @@ func (r *Reconciler) ReconcileCreate(ctx context.Context, lc loggedcluster.Inter } if !needUpdate(currentEventsLoggerConfig, desiredEventsLoggerConfig) { - logger.Info("grafana-agent-config up to date") + logger.Info("events-logger-config up to date") return ctrl.Result{}, nil } - logger.Info("grafana-agent-config - updating") + logger.Info("events-logger-config - updating") err = r.Client.Update(ctx, &desiredEventsLoggerConfig) if err != nil { return ctrl.Result{}, errors.WithStack(err) } - logger.Info("grafana-agent-config - done") + logger.Info("events-logger-config - done") return ctrl.Result{}, nil + } -// ReconcileDelete ensure grafana-agent-config is deleted for the given cluster. func (r *Reconciler) ReconcileDelete(ctx context.Context, lc loggedcluster.Interface) (ctrl.Result, error) { logger := log.FromContext(ctx) - logger.Info("grafana-agent-config delete") + logger.Info("events-logger-config delete") // Get expected configmap. var currentEventsLoggerConfig v1.ConfigMap - err := r.Client.Get(ctx, types.NamespacedName{Name: getGrafanaAgentConfigName(lc), Namespace: lc.GetAppsNamespace()}, ¤tEventsLoggerConfig) + err := r.Client.Get(ctx, types.NamespacedName{Name: getEventsLoggerConfigName(lc), Namespace: lc.GetAppsNamespace()}, ¤tEventsLoggerConfig) if err != nil { if apimachineryerrors.IsNotFound(err) { - logger.Info("grafana-agent-config not found, stop here") + logger.Info("events-logger-config not found, stop here") return ctrl.Result{}, nil } return ctrl.Result{}, errors.WithStack(err) } // Delete configmap. - logger.Info("grafana-agent-config deleting", "namespace", currentEventsLoggerConfig.GetNamespace(), "name", currentEventsLoggerConfig.GetName()) + logger.Info("events-logger-config deleting", "namespace", currentEventsLoggerConfig.GetNamespace(), "name", currentEventsLoggerConfig.GetName()) err = r.Client.Delete(ctx, ¤tEventsLoggerConfig) if err != nil { if apimachineryerrors.IsNotFound(err) { // Do no throw error in case it was not found, as this means // it was already deleted. - logger.Info("grafana-agent-config already deleted") + logger.Info("events-logger-config already deleted") return ctrl.Result{}, nil } return ctrl.Result{}, errors.WithStack(err) } - logger.Info("grafana-agent-config deleted") + logger.Info("events-logger-config deleted") return ctrl.Result{}, nil } diff --git a/pkg/resource/events-logger-secret/events-logger-secret.go b/pkg/resource/events-logger-secret/events-logger-secret.go new file mode 100644 index 00000000..a7eb79ad --- /dev/null +++ b/pkg/resource/events-logger-secret/events-logger-secret.go @@ -0,0 +1,68 @@ +package eventsloggersecret + +import ( + "fmt" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pkg/errors" + + "github.com/giantswarm/logging-operator/pkg/common" + loggedcluster "github.com/giantswarm/logging-operator/pkg/logged-cluster" + loggingsecret "github.com/giantswarm/logging-operator/pkg/resource/logging-secret" +) + +const ( + eventsLoggerSecretName = "events-logger-secret" // #nosec G101 + grafanaAgentSecretName = "grafana-agent-secret" // #nosec G101 +) + +func generateEventsLoggerSecret(lc loggedcluster.Interface, loggingCredentialsSecret *v1.Secret, lokiURL string) (v1.Secret, error) { + var data map[string][]byte + var err error + + switch lc.GetKubeEventsLogger() { + case common.EventsLoggerGrafanaAgent: + data, err = generateGrafanaAgentSecret(lc, loggingCredentialsSecret, lokiURL) + if err != nil { + return v1.Secret{}, err + } + case common.EventsLoggerAlloy: + // In the case of Alloy being the events logger, we reuse the secret generation from the logging-secret package + data, err = loggingsecret.GenerateAlloyLoggingSecret(lc, loggingCredentialsSecret, lokiURL) + if err != nil { + return v1.Secret{}, err + } + default: + return v1.Secret{}, errors.Errorf("unsupported logging agent %q", lc.GetLoggingAgent()) + } + + secret := v1.Secret{ + ObjectMeta: secretMeta(lc), + Data: data, + } + + return secret, nil +} + +// SecretMeta returns metadata for the events-logger-secret +func secretMeta(lc loggedcluster.Interface) metav1.ObjectMeta { + metadata := metav1.ObjectMeta{ + Name: GetEventsLoggerSecretName(lc), + Namespace: lc.GetAppsNamespace(), + Labels: map[string]string{}, + } + + common.AddCommonLabels(metadata.Labels) + return metadata +} + +func GetEventsLoggerSecretName(lc loggedcluster.Interface) string { + switch lc.GetKubeEventsLogger() { + case common.EventsLoggerGrafanaAgent: + return fmt.Sprintf("%s-%s", lc.GetClusterName(), grafanaAgentSecretName) + default: + return fmt.Sprintf("%s-%s", lc.GetClusterName(), eventsLoggerSecretName) + } +} diff --git a/pkg/resource/events-logger-secret/grafana-agent-secret.go b/pkg/resource/events-logger-secret/grafana-agent-secret.go index cc28a4f3..744b4869 100644 --- a/pkg/resource/events-logger-secret/grafana-agent-secret.go +++ b/pkg/resource/events-logger-secret/grafana-agent-secret.go @@ -5,7 +5,6 @@ import ( "github.com/pkg/errors" v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/yaml" "github.com/giantswarm/logging-operator/pkg/common" @@ -22,26 +21,14 @@ type extraSecret struct { Data map[string]string `yaml:"data" json:"data"` } -// SecretMeta returns metadata for the grafana-agent-secret -func SecretMeta(lc loggedcluster.Interface) metav1.ObjectMeta { - metadata := metav1.ObjectMeta{ - Name: getGrafanaAgentSecretName(lc), - Namespace: lc.GetAppsNamespace(), - Labels: map[string]string{}, - } - - common.AddCommonLabels(metadata.Labels) - return metadata -} - // GenerateGrafanaAgentSecret returns a secret for // the Loki-multi-tenant-proxy config -func GenerateGrafanaAgentSecret(lc loggedcluster.Interface, credentialsSecret *v1.Secret, lokiURL string) (v1.Secret, error) { +func generateGrafanaAgentSecret(lc loggedcluster.Interface, credentialsSecret *v1.Secret, lokiURL string) (map[string][]byte, error) { clusterName := lc.GetClusterName() writeUser := clusterName writePassword, err := loggingcredentials.GetPassword(lc, credentialsSecret, clusterName) if err != nil { - return v1.Secret{}, errors.WithStack(err) + return nil, errors.WithStack(err) } values := values{ @@ -58,19 +45,11 @@ func GenerateGrafanaAgentSecret(lc loggedcluster.Interface, credentialsSecret *v v, err := yaml.Marshal(values) if err != nil { - return v1.Secret{}, errors.WithStack(err) + return nil, errors.WithStack(err) } - secret := v1.Secret{ - ObjectMeta: SecretMeta(lc), - Data: map[string][]byte{ - "values": []byte(v), - }, - } - - return secret, nil -} + data := make(map[string][]byte) + data["values"] = []byte(v) -func getGrafanaAgentSecretName(lc loggedcluster.Interface) string { - return fmt.Sprintf("%s-%s", lc.GetClusterName(), common.GrafanaAgentExtraSecretName()) + return data, nil } diff --git a/pkg/resource/events-logger-secret/reconciler.go b/pkg/resource/events-logger-secret/reconciler.go index 75558556..80355064 100644 --- a/pkg/resource/events-logger-secret/reconciler.go +++ b/pkg/resource/events-logger-secret/reconciler.go @@ -3,10 +3,7 @@ package eventsloggersecret import ( "context" "reflect" - "time" - "github.com/blang/semver" - appv1 "github.com/giantswarm/apiextensions-application/api/v1alpha1" "github.com/pkg/errors" v1 "k8s.io/api/core/v1" apimachineryerrors "k8s.io/apimachinery/pkg/api/errors" @@ -22,49 +19,20 @@ import ( ) // Reconciler implements a reconciler.Interface to handle -// grafana-agent secret: extra secret which stores logging write credentials +// Events-logger secret: extra events-logger secret about where and how to send logs (in this case : k8S events) type Reconciler struct { client.Client } -// ReconcileCreate ensures grafana-agent secret is created with the right credentials +// ReconcileCreate ensures events-logger-secret is created with the right credentials func (r *Reconciler) ReconcileCreate(ctx context.Context, lc loggedcluster.Interface) (ctrl.Result, error) { logger := log.FromContext(ctx) - logger.Info("grafana-agent-secret create") - - observabilityBundleVersion, err := common.GetObservabilityBundleAppVersion(lc, r.Client, ctx) - if err != nil { - // Handle case where the app is not found. - if apimachineryerrors.IsNotFound(err) { - logger.Info("grafana-agent-secret - observability bundle app not found, requeueing") - // If the app is not found we should requeue and try again later (5 minutes is the app platform default reconciliation time) - return ctrl.Result{RequeueAfter: time.Duration(5 * time.Minute)}, nil - } - return ctrl.Result{}, errors.WithStack(err) - } - - // The grafana agent was added only for bundle version 0.9.0 and above (cf. https://github.com/giantswarm/observability-bundle/compare/v0.8.9...v0.9.0) - if observabilityBundleVersion.LT(semver.MustParse("0.9.0")) { - return ctrl.Result{}, nil - } - - // Check existence of grafana-agent app - var currentApp appv1.App - appMeta := common.ObservabilityBundleAppMeta(lc) - err = r.Client.Get(ctx, types.NamespacedName{Name: lc.AppConfigName("grafana-agent"), Namespace: appMeta.GetNamespace()}, ¤tApp) - if err != nil { - if apimachineryerrors.IsNotFound(err) { - logger.Info("grafana-agent-secret - app not found, requeuing") - // If the app is not found we should requeue and try again later (5 minutes is the app platform default reconciliation time) - return ctrl.Result{RequeueAfter: time.Duration(5 * time.Minute)}, nil - } - return ctrl.Result{}, errors.WithStack(err) - } + logger.Info("events-logger-secret create") // Retrieve secret containing credentials - var loggingCredentialsSecret v1.Secret - err = r.Client.Get(ctx, types.NamespacedName{Name: loggingcredentials.LoggingCredentialsSecretMeta(lc).Name, Namespace: loggingcredentials.LoggingCredentialsSecretMeta(lc).Namespace}, - &loggingCredentialsSecret) + var eventsLoggerCredentialsSecret v1.Secret + err := r.Client.Get(ctx, types.NamespacedName{Name: loggingcredentials.LoggingCredentialsSecretMeta(lc).Name, Namespace: loggingcredentials.LoggingCredentialsSecretMeta(lc).Namespace}, + &eventsLoggerCredentialsSecret) if err != nil { return ctrl.Result{}, errors.WithStack(err) } @@ -76,20 +44,20 @@ func (r *Reconciler) ReconcileCreate(ctx context.Context, lc loggedcluster.Inter } // Get desired secret - desiredGrafanaAgentSecret, err := GenerateGrafanaAgentSecret(lc, &loggingCredentialsSecret, lokiURL) + desiredEventsLoggerSecret, err := generateEventsLoggerSecret(lc, &eventsLoggerCredentialsSecret, lokiURL) if err != nil { - logger.Info("grafana-agent-secret - failed generating auth config!", "error", err) + logger.Error(err, "failed generating events logger secret") return ctrl.Result{}, errors.WithStack(err) } // Check if secret already exists. - logger.Info("grafana-agent-secret - getting", "namespace", desiredGrafanaAgentSecret.GetNamespace(), "name", desiredGrafanaAgentSecret.GetName()) - var currentGrafanaAgentSecret v1.Secret - err = r.Client.Get(ctx, types.NamespacedName{Name: desiredGrafanaAgentSecret.GetName(), Namespace: desiredGrafanaAgentSecret.GetNamespace()}, ¤tGrafanaAgentSecret) + logger.Info("events-logger-secret - getting", "namespace", desiredEventsLoggerSecret.GetNamespace(), "name", desiredEventsLoggerSecret.GetName()) + var currentEventsLoggerSecret v1.Secret + err = r.Client.Get(ctx, types.NamespacedName{Name: desiredEventsLoggerSecret.GetName(), Namespace: desiredEventsLoggerSecret.GetNamespace()}, ¤tEventsLoggerSecret) if err != nil { if apimachineryerrors.IsNotFound(err) { - logger.Info("grafana-agent-secret not found, creating") - err = r.Client.Create(ctx, &desiredGrafanaAgentSecret) + logger.Info("events-logger-secret not found, creating") + err = r.Client.Create(ctx, &desiredEventsLoggerSecret) if err != nil { return ctrl.Result{}, errors.WithStack(err) } @@ -98,50 +66,50 @@ func (r *Reconciler) ReconcileCreate(ctx context.Context, lc loggedcluster.Inter } } - if !needUpdate(currentGrafanaAgentSecret, desiredGrafanaAgentSecret) { - logger.Info("grafana-agent-secret up to date") + if !needUpdate(currentEventsLoggerSecret, desiredEventsLoggerSecret) { + logger.Info("events-logger-secret up to date") return ctrl.Result{}, nil } - logger.Info("grafana-agent-secret - updating") - err = r.Client.Update(ctx, &desiredGrafanaAgentSecret) + logger.Info("updating events-logger-secret") + err = r.Client.Update(ctx, &desiredEventsLoggerSecret) if err != nil { return ctrl.Result{}, errors.WithStack(err) } - logger.Info("grafana-agent-secret - done") + logger.Info("updated events-logger-secret") return ctrl.Result{}, nil } -// ReconcileDelete ensure grafana-agent-secret is deleted for the given cluster. +// ReconcileDelete - Not much to do here when a cluster is deleted func (r *Reconciler) ReconcileDelete(ctx context.Context, lc loggedcluster.Interface) (ctrl.Result, error) { logger := log.FromContext(ctx) - logger.Info("grafana-agent-secret delete") + logger.Info("events-logger-secret delete") // Get expected secret. - var currentGrafanaAgentSecret v1.Secret - err := r.Client.Get(ctx, types.NamespacedName{Name: getGrafanaAgentSecretName(lc), Namespace: lc.GetAppsNamespace()}, ¤tGrafanaAgentSecret) + var currentEventsLoggerSecret v1.Secret + err := r.Client.Get(ctx, types.NamespacedName{Name: GetEventsLoggerSecretName(lc), Namespace: lc.GetAppsNamespace()}, ¤tEventsLoggerSecret) if err != nil { if apimachineryerrors.IsNotFound(err) { - logger.Info("grafana-agent-secret not found, stop here") + logger.Info("events-logger-secret not found, stop here") return ctrl.Result{}, nil } return ctrl.Result{}, errors.WithStack(err) } // Delete secret. - logger.Info("grafana-agent-secret deleting", "namespace", currentGrafanaAgentSecret.GetNamespace(), "name", currentGrafanaAgentSecret.GetName()) - err = r.Client.Delete(ctx, ¤tGrafanaAgentSecret) + logger.Info("events-logger-secret deleting", "namespace", currentEventsLoggerSecret.GetNamespace(), "name", currentEventsLoggerSecret.GetName()) + err = r.Client.Delete(ctx, ¤tEventsLoggerSecret) if err != nil { if apimachineryerrors.IsNotFound(err) { // Do no throw error in case it was not found, as this means // it was already deleted. - logger.Info("grafana-agent-secret already deleted") + logger.Info("events-logger-secret already deleted") return ctrl.Result{}, nil } return ctrl.Result{}, errors.WithStack(err) } - logger.Info("grafana-agent-secret deleted") + logger.Info("events-logger-secret deleted") return ctrl.Result{}, nil }