diff --git a/cmd/manager/main.go b/cmd/manager/main.go index fba83a755c..db9a5e9863 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -616,7 +616,13 @@ func startOperator(ctx context.Context) error { return err } - setDefaultSecurityContext, err := determineSetDefaultSecurityContext(viper.GetString(operator.SetDefaultSecurityContextFlag), clientset) + openshift, err := isOpenShift(clientset) + if err != nil { + log.Error(err, "failed to determine whether the operator is running within openshift") + return err + } + + setDefaultSecurityContext, err := determineSetDefaultSecurityContext(viper.GetString(operator.SetDefaultSecurityContextFlag), openshift) if err != nil { log.Error(err, "failed to determine how to set default security context") return err @@ -654,6 +660,7 @@ func startOperator(ctx context.Context) error { SetDefaultSecurityContext: setDefaultSecurityContext, ValidateStorageClass: viper.GetBool(operator.ValidateStorageClassFlag), Tracer: tracer, + IsOpenshift: openshift, } if viper.GetBool(operator.EnableWebhookFlag) { @@ -788,10 +795,9 @@ func chooseAndValidateIPFamily(ipFamilyStr string, ipFamilyDefault corev1.IPFami // 2. use OpenShift detection to determine whether or not we are running within an OpenShift cluster. // If we determine we are on an OpenShift cluster, and since OpenShift automatically sets security context, return false, // otherwise, return true as we'll need to set this security context on non-OpenShift clusters. -func determineSetDefaultSecurityContext(setDefaultSecurityContext string, clientset kubernetes.Interface) (bool, error) { +func determineSetDefaultSecurityContext(setDefaultSecurityContext string, openshift bool) (bool, error) { if setDefaultSecurityContext == "auto-detect" { - openshift, err := isOpenShift(clientset) - return !openshift, err + return !openshift, nil } return strconv.ParseBool(setDefaultSecurityContext) } @@ -827,7 +833,7 @@ func isOpenShift(clientset kubernetes.Interface) (bool, error) { } // We could not determine that we are running on an OpenShift cluster, - // so we will behave as if "setDefaultSecurityContext" was set to true. + // so return false. return false, nil } diff --git a/cmd/manager/main_test.go b/cmd/manager/main_test.go index e20921b697..3f30c38b6b 100644 --- a/cmd/manager/main_test.go +++ b/cmd/manager/main_test.go @@ -197,104 +197,63 @@ func Test_garbageCollectSoftOwnedSecrets(t *testing.T) { } } -func Test_determineSetDefaultSecurityContext(t *testing.T) { - type args struct { - setDefaultSecurityContext string - clientset kubernetes.Interface - } +func Test_isOpenShift(t *testing.T) { tests := []struct { - name string - args args - want bool - wantErr bool + name string + clientset kubernetes.Interface + want bool + wantErr bool }{ { - "auto-detect on OpenShift cluster does not set security context", - args{ - "auto-detect", - newFakeK8sClientsetWithDiscovery([]*metav1.APIResourceList{ - { - GroupVersion: schema.GroupVersion{Group: "security.openshift.io", Version: "v1"}.String(), - APIResources: []metav1.APIResource{ - { - Name: "securitycontextconstraints", - }, + name: "on OpenShift cluster is detected properly", + clientset: newFakeK8sClientsetWithDiscovery([]*metav1.APIResourceList{ + { + GroupVersion: schema.GroupVersion{Group: "security.openshift.io", Version: "v1"}.String(), + APIResources: []metav1.APIResource{ + { + Name: "securitycontextconstraints", }, }, - }, nil), - }, - false, - false, + }, + }, nil), + want: true, + wantErr: false, }, { - "auto-detect on OpenShift cluster, returning group discovery failed error for OpenShift security group+version, does not set security context", - args{ - "auto-detect", - newFakeK8sClientsetWithDiscovery([]*metav1.APIResourceList{}, &discovery.ErrGroupDiscoveryFailed{ - Groups: map[schema.GroupVersion]error{ - {Group: "security.openshift.io", Version: "v1"}: nil, - }, - }), - }, - false, - false, + name: "on OpenShift cluster, returning group discovery failed error for OpenShift security group+version returns true", + clientset: newFakeK8sClientsetWithDiscovery([]*metav1.APIResourceList{}, &discovery.ErrGroupDiscoveryFailed{ + Groups: map[schema.GroupVersion]error{ + {Group: "security.openshift.io", Version: "v1"}: nil, + }, + }), + want: true, + wantErr: false, }, { - "auto-detect on non-OpenShift cluster, returning not found error, sets security context", - args{ - "auto-detect", - newFakeK8sClientsetWithDiscovery([]*metav1.APIResourceList{}, apierrors.NewNotFound(schema.GroupResource{ - Group: "security.openshift.io", - Resource: "none", - }, "fake")), - }, - true, - false, + name: "not on non-OpenShift cluster, returning not found error returns false", + clientset: newFakeK8sClientsetWithDiscovery([]*metav1.APIResourceList{}, apierrors.NewNotFound(schema.GroupResource{ + Group: "security.openshift.io", + Resource: "none", + }, "fake")), + want: false, + wantErr: false, }, { - "auto-detect on non-OpenShift cluster, returning random error, returns error", - args{ - "auto-detect", - newFakeK8sClientsetWithDiscovery([]*metav1.APIResourceList{}, fmt.Errorf("random error")), - }, - true, - true, - }, - { - "true set, returning no error, will set security context", - args{ - "true", - newFakeK8sClientsetWithDiscovery([]*metav1.APIResourceList{}, nil), - }, - true, - false, - }, { - "false set, returning no error, will not set security context", - args{ - "false", - newFakeK8sClientsetWithDiscovery([]*metav1.APIResourceList{}, nil), - }, - false, - false, - }, { - "invalid bool set, returns error", - args{ - "invalid", - newFakeK8sClientsetWithDiscovery([]*metav1.APIResourceList{}, nil), - }, - false, - true, + name: "random error defaults to false and returns error", + clientset: newFakeK8sClientsetWithDiscovery([]*metav1.APIResourceList{}, fmt.Errorf("random error")), + want: false, + wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := determineSetDefaultSecurityContext(tt.args.setDefaultSecurityContext, tt.args.clientset) + got, err := isOpenShift(tt.clientset) if (err != nil) != tt.wantErr { - t.Errorf("determineSetDefaultSecurityContext() error = %v, wantErr %v", err, tt.wantErr) + t.Errorf("isOpenShift() error = %v, wantErr %v", err, tt.wantErr) return } if got != tt.want { - t.Errorf("determineSetDefaultSecurityContext() = %v, want %v", got, tt.want) + t.Errorf("isOpenShift() = %v, want %v", got, tt.want) } }) } @@ -337,3 +296,74 @@ func newFakeK8sClientsetWithDiscovery(resources []*metav1.APIResourceList, disco } return client } + +func Test_determineSetDefaultSecurityContext(t *testing.T) { + type args struct { + setDefaultSecurityContext string + openshift bool + } + tests := []struct { + name string + args args + want bool + wantErr bool + }{ + { + name: "on openshift with auto-detect set return false", + args: args{ + setDefaultSecurityContext: "auto-detect", + openshift: true, + }, + want: false, + wantErr: false, + }, + { + name: "not on openshift with auto-detect set return true", + args: args{ + setDefaultSecurityContext: "auto-detect", + openshift: false, + }, + want: true, + wantErr: false, + }, + { + name: "not on openshift with set-default-security-context set to true returns true", + args: args{ + setDefaultSecurityContext: "true", + openshift: false, + }, + want: true, + wantErr: false, + }, + { + name: "on openshift with set-default-security-context set to true returns true", + args: args{ + setDefaultSecurityContext: "true", + openshift: false, + }, + want: true, + wantErr: false, + }, + { + name: "invalid bool for set-default-security-context returns false and error", + args: args{ + setDefaultSecurityContext: "invalid", + openshift: false, + }, + want: false, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := determineSetDefaultSecurityContext(tt.args.setDefaultSecurityContext, tt.args.openshift) + if (err != nil) != tt.wantErr { + t.Errorf("determineSetDefaultSecurityContext() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("determineSetDefaultSecurityContext() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/config/recipes/elastic-agent/fleet-apm-integration.yaml b/config/recipes/elastic-agent/fleet-apm-integration.yaml index 2f2b73ecd6..e4bd97fc15 100644 --- a/config/recipes/elastic-agent/fleet-apm-integration.yaml +++ b/config/recipes/elastic-agent/fleet-apm-integration.yaml @@ -86,8 +86,6 @@ spec: spec: serviceAccountName: fleet-server automountServiceAccountToken: true - securityContext: - runAsUser: 0 --- apiVersion: agent.k8s.elastic.co/v1alpha1 kind: Agent @@ -102,10 +100,6 @@ spec: mode: fleet deployment: replicas: 1 - podTemplate: - spec: - securityContext: - runAsUser: 0 --- apiVersion: v1 kind: Service diff --git a/config/recipes/elastic-agent/fleet-custom-logs-integration.yaml b/config/recipes/elastic-agent/fleet-custom-logs-integration.yaml index cfd35f0871..0ac0db5f37 100644 --- a/config/recipes/elastic-agent/fleet-custom-logs-integration.yaml +++ b/config/recipes/elastic-agent/fleet-custom-logs-integration.yaml @@ -95,8 +95,6 @@ spec: spec: serviceAccountName: fleet-server automountServiceAccountToken: true - securityContext: - runAsUser: 0 --- apiVersion: agent.k8s.elastic.co/v1alpha1 kind: Agent @@ -114,8 +112,6 @@ spec: spec: serviceAccountName: elastic-agent automountServiceAccountToken: true - securityContext: - runAsUser: 0 containers: - name: agent volumeMounts: diff --git a/config/recipes/elastic-agent/fleet-kubernetes-integration.yaml b/config/recipes/elastic-agent/fleet-kubernetes-integration.yaml index 098466fecf..57a49277f7 100644 --- a/config/recipes/elastic-agent/fleet-kubernetes-integration.yaml +++ b/config/recipes/elastic-agent/fleet-kubernetes-integration.yaml @@ -79,8 +79,6 @@ spec: spec: serviceAccountName: fleet-server automountServiceAccountToken: true - securityContext: - runAsUser: 0 --- apiVersion: agent.k8s.elastic.co/v1alpha1 kind: Agent @@ -97,11 +95,7 @@ spec: podTemplate: spec: serviceAccountName: elastic-agent - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet automountServiceAccountToken: true - securityContext: - runAsUser: 0 --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole diff --git a/config/recipes/elastic-agent/kubernetes-integration.yaml b/config/recipes/elastic-agent/kubernetes-integration.yaml index b72664bcd5..f26d42ab2a 100644 --- a/config/recipes/elastic-agent/kubernetes-integration.yaml +++ b/config/recipes/elastic-agent/kubernetes-integration.yaml @@ -13,8 +13,6 @@ spec: serviceAccountName: elastic-agent containers: - name: agent - securityContext: - runAsUser: 0 env: - name: NODE_NAME valueFrom: diff --git a/config/recipes/elastic-agent/multi-output.yaml b/config/recipes/elastic-agent/multi-output.yaml index daa6d6d5ec..512ab1dfbf 100644 --- a/config/recipes/elastic-agent/multi-output.yaml +++ b/config/recipes/elastic-agent/multi-output.yaml @@ -14,8 +14,6 @@ spec: spec: containers: - name: agent - securityContext: - runAsUser: 0 volumeMounts: - mountPath: /var/log name: varlog diff --git a/config/recipes/elastic-agent/system-integration.yaml b/config/recipes/elastic-agent/system-integration.yaml index edd0a9853e..b1aae248c2 100644 --- a/config/recipes/elastic-agent/system-integration.yaml +++ b/config/recipes/elastic-agent/system-integration.yaml @@ -11,8 +11,6 @@ spec: spec: containers: - name: agent - securityContext: - runAsUser: 0 config: id: 488e0b80-3634-11eb-8208-57893829af4e revision: 2 diff --git a/docs/orchestrating-elastic-stack-applications/agent-standalone.asciidoc b/docs/orchestrating-elastic-stack-applications/agent-standalone.asciidoc index 6af8c5b407..4d037b10ed 100644 --- a/docs/orchestrating-elastic-stack-applications/agent-standalone.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/agent-standalone.asciidoc @@ -32,11 +32,7 @@ spec: version: {version} elasticsearchRefs: - name: quickstart - daemonSet: - podTemplate: - spec: - securityContext: - runAsUser: 0 <1> + daemonSet: {} config: inputs: - name: system-1 @@ -62,9 +58,6 @@ spec: period: 10s EOF ---- -+ -<1> The root user is required to persist state in a hostPath volume. See <<{p}_storing_local_state_in_host_path_volume>> on how to disable this feature. -+ Check <<{p}-elastic-agent-configuration-examples>> for more ready-to-use manifests. . Monitor the status of Elastic Agent. @@ -140,11 +133,7 @@ spec: version: {version} elasticsearchRefs: - name: quickstart - daemonSet: - podTemplate: - spec: - securityContext: - runAsUser: 0 <1> + daemonSet: {} config: inputs: - name: system-1 @@ -170,8 +159,6 @@ spec: period: 10s ---- -<1> The root user is required to persist state in a hostPath volume. See <<{p}_storing_local_state_in_host_path_volume>> on how to disable this feature. - Alternatively, it can be provided through a Secret specified in the `configRef` element. The Secret must have an `agent.yml` entry with this configuration: [source,yaml,subs="attributes,+macros"] ---- @@ -183,11 +170,7 @@ spec: version: {version} elasticsearchRefs: - name: quickstart - daemonSet: - podTemplate: - spec: - securityContext: - runAsUser: 0 + daemonSet: {} configRef: secretName: system-cpu-config --- @@ -239,11 +222,7 @@ metadata: name: quickstart spec: version: {version} - daemonSet: - podTemplate: - spec: - securityContext: - runAsUser: 0 + daemonSet: {} elasticsearchRefs: - name: quickstart outputName: default @@ -285,11 +264,7 @@ metadata: name: quickstart spec: version: {version} - daemonSet: - podTemplate: - spec: - securityContext: - runAsUser: 0 + daemonSet: {} config: outputs: default: @@ -316,11 +291,7 @@ metadata: name: quickstart spec: version: {version} - daemonSet: - podTemplate: - spec: - securityContext: - runAsUser: 0 + daemonSet: {} strategy: type: RollingUpdate rollingUpdate: @@ -350,8 +321,6 @@ spec: spec: automountServiceAccountToken: true serviceAccountName: elastic-agent - securityContext: - runAsUser: 0 ... --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/pkg/controller/agent/pod.go b/pkg/controller/agent/pod.go index 942e7504c9..6231f76df1 100644 --- a/pkg/controller/agent/pod.go +++ b/pkg/controller/agent/pod.go @@ -183,6 +183,7 @@ func buildPodTemplate(params Params, fleetCerts *certificates.CertificatesSecret WithDockerImage(spec.Image, container.ImageRepository(container.AgentImage, spec.Version)). WithAutomountServiceAccountToken(). WithVolumeLikes(vols...). + WithInitContainers(maybeAgentInitContainerForHostpathVolume(params, v)...). WithEnv( corev1.EnvVar{Name: "NODE_NAME", ValueFrom: &corev1.EnvVarSource{ FieldRef: &corev1.ObjectFieldSelector{ @@ -357,13 +358,19 @@ func applyRelatedEsAssoc(agent agentv1alpha1.Agent, esAssociation commonv1.Assoc certificatesDir(esAssociation), )) - // Beats managed by the Elastic Agent don't trust the Elasticsearch CA that Elastic Agent itself is configured - // to trust. There is currently no way to configure those Beats to trust a particular CA. The intended way to handle - // it is to allow Fleet to provide Beat output settings, but due to https://github.com/elastic/kibana/issues/102794 - // this is not supported outside of UI. To workaround this limitation the Agent is going to update Pod-wide CA store - // before starting Elastic Agent. - cmd := trustCAScript(path.Join(certificatesDir(esAssociation), CAFileName)) - return builder.WithCommand([]string{"/usr/bin/env", "bash", "-c", cmd}), nil + v, err := version.Parse(agent.Spec.Version) + if err != nil { + return nil, err // error unlikely and should have been caught during validation + } + + // Agent prior to 7.14.0 did not respect the FLEET_CA environment variable and as such + // the Agent is going to update Pod-wide CA store before starting Elastic Agent. + // (https://github.com/elastic/beats/pull/26529) + if v.LT(version.MinFor(7, 14, 0)) { + cmd := trustCAScript(path.Join(certificatesDir(esAssociation), CAFileName)) + return builder.WithCommand([]string{"/usr/bin/env", "bash", "-c", cmd}), nil + } + return builder, nil } func writeEsAssocToConfigHash(params Params, esAssociation commonv1.Association, configHash hash.Hash) error { diff --git a/pkg/controller/agent/volume.go b/pkg/controller/agent/volume.go new file mode 100644 index 0000000000..d8219f14d8 --- /dev/null +++ b/pkg/controller/agent/volume.go @@ -0,0 +1,167 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package agent + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + "github.com/blang/semver/v4" + + agentv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/agent/v1alpha1" + container "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/container" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/version" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/pointer" +) + +const ( + hostPathVolumeInitContainerName = "permissions" + chconCmd = "chcon -Rt svirt_sandbox_file_t /usr/share/elastic-agent/state" + permissionsCmdFmt = `#!/usr/bin/env bash + set -e + find /usr/share/elastic-agent -ls + if [[ -d /usr/share/elastic-agent/state ]]; then + %s + chmod g+rw /usr/share/elastic-agent/state + chgrp 1000 /usr/share/elastic-agent/state + if [ -n "$(ls -A /usr/share/elastic-agent/state 2>/dev/null)" ]; then + chgrp 1000 /usr/share/elastic-agent/state/* + chmod g+rw /usr/share/elastic-agent/state/* + fi + fi + ` +) + +var ( + hostPathVolumeInitContainerResources = corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceMemory: resource.MustParse("128Mi"), + corev1.ResourceCPU: resource.MustParse("100m"), + }, + Limits: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceMemory: resource.MustParse("128Mi"), + corev1.ResourceCPU: resource.MustParse("100m"), + }, + } +) + +// maybeAgentInitContainerForHostpathVolume will return an init container that ensures that the host +// volume's permissions are sufficient for the Agent to maintain state if the Elastic Agent +// has the following attributes: +// +// 1. Agent volume is not set to emptyDir. +// 2. Agent version is above 7.15. +// 3. Agent spec is not configured to run as root. +func maybeAgentInitContainerForHostpathVolume(params Params, v semver.Version) (initContainers []corev1.Container) { + // Only add initContainer to chown hostpath data volume for Agent > 7.15 + if !v.GTE(version.MinFor(7, 15, 0)) { + return nil + } + + spec := ¶ms.Agent.Spec + + image := spec.Image + if image == "" { + image = container.ImageRepository(container.AgentImage, spec.Version) + } + + if !dataVolumeEmptyDir(spec) && !runningAsRoot(spec) { + container := corev1.Container{ + Image: image, + Command: hostPathVolumeInitContainerCommand(params.OperatorParams.IsOpenshift), + Name: hostPathVolumeInitContainerName, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(0), + }, + Resources: hostPathVolumeInitContainerResources, + VolumeMounts: []corev1.VolumeMount{ + { + Name: DataVolumeName, + MountPath: DataMountPath, + }, + }, + } + if params.OperatorParams.IsOpenshift { + container.SecurityContext.Privileged = pointer.Bool(true) + } + initContainers = append(initContainers, container) + } + + return initContainers +} + +// hostPathVolumeInitContainerCommand returns the container command +// for maintaining permissions for Elastic Agent. +func hostPathVolumeInitContainerCommand(isOpenshift bool) []string { + command := []string{"/usr/bin/env", "bash", "-c"} + if isOpenshift { + command = append(command, fmt.Sprintf(permissionsCmdFmt, chconCmd)) + return command + } + command = append(command, fmt.Sprintf(permissionsCmdFmt, "")) + return command +} + +// runningAsRoot will return true if either the Daemonset or Deployment for +// Elastic Agent has a security context set where the container will run as root. +func runningAsRoot(spec *agentv1alpha1.AgentSpec) bool { + if spec.DaemonSet != nil { + templateSpec := spec.DaemonSet.PodTemplate.Spec + if templateSpec.SecurityContext != nil && + templateSpec.SecurityContext.RunAsUser != nil && *templateSpec.SecurityContext.RunAsUser == 0 { + return true + } + return containerRunningAsUser0(templateSpec) + } + if spec.Deployment != nil { + templateSpec := spec.Deployment.PodTemplate.Spec + if templateSpec.SecurityContext != nil && + templateSpec.SecurityContext.RunAsUser != nil && *templateSpec.SecurityContext.RunAsUser == 0 { + return true + } + return containerRunningAsUser0(templateSpec) + } + return false +} + +// containerRunningAsUser0 will return true if the Agent container +// has its pod security context set to run as root. +func containerRunningAsUser0(spec corev1.PodSpec) bool { + for _, container := range spec.Containers { + if container.Name == "agent" { + if container.SecurityContext == nil { + return false + } + if container.SecurityContext.RunAsUser != nil && *container.SecurityContext.RunAsUser == 0 { + return true + } + return false + } + } + return false +} + +// dataVolumeEmptyDir will return true if either the Daemonset or Deployment for +// Elastic Agent has it's Agent volume configured for EmptyDir. +func dataVolumeEmptyDir(spec *agentv1alpha1.AgentSpec) bool { + if spec.DaemonSet != nil { + return volumeIsEmptyDir(spec.DaemonSet.PodTemplate.Spec.Volumes) + } + if spec.Deployment != nil { + return volumeIsEmptyDir(spec.Deployment.PodTemplate.Spec.Volumes) + } + return false +} + +func volumeIsEmptyDir(vols []corev1.Volume) bool { + for _, vol := range vols { + if vol.Name == DataVolumeName && vol.VolumeSource.EmptyDir != nil { + return true + } + } + return false +} diff --git a/pkg/controller/agent/volume_test.go b/pkg/controller/agent/volume_test.go new file mode 100644 index 0000000000..9f7932fd84 --- /dev/null +++ b/pkg/controller/agent/volume_test.go @@ -0,0 +1,390 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package agent + +import ( + "testing" + + "github.com/blang/semver/v4" + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + agentv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/agent/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/operator" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/pointer" +) + +var ( + agentDeploymentFixture = agentv1alpha1.Agent{ + ObjectMeta: metav1.ObjectMeta{ + Name: "agent", + Namespace: "test", + }, + Spec: agentv1alpha1.AgentSpec{ + Deployment: &agentv1alpha1.DeploymentSpec{}, + }, + } + + agentDaemonsetFixture = agentv1alpha1.Agent{ + ObjectMeta: metav1.ObjectMeta{ + Name: "agent", + Namespace: "test", + }, + Spec: agentv1alpha1.AgentSpec{ + DaemonSet: &agentv1alpha1.DaemonSetSpec{}, + }, + } +) + +func Test_volumeIsEmptyDir(t *testing.T) { + tests := []struct { + name string + vols []corev1.Volume + want bool + }{ + { + name: "agent-data volume as EmptyDir is true", + vols: []corev1.Volume{ + { + Name: DataVolumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + want: true, + }, + { + name: "agent-data volume as Hostpath is false", + vols: []corev1.Volume{ + { + Name: DataVolumeName, + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{}, + }, + }, + }, + want: false, + }, + { + name: "random volume as EmptyDir is false", + vols: []corev1.Volume{ + { + Name: "random", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + want: false, + }, + { + name: "empty volumes is false", + vols: []corev1.Volume{}, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := volumeIsEmptyDir(tt.vols); got != tt.want { + t.Errorf("volumeIsEmptyDir() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_dataVolumeEmptyDir(t *testing.T) { + tests := []struct { + name string + spec *agentv1alpha1.AgentSpec + want bool + }{ + { + name: "agent with deployment and no volumes defaults to false", + spec: &agentDeploymentFixture.Spec, + want: false, + }, + { + name: "agent with daemonset and no volumes defaults to false", + spec: &agentDeploymentFixture.Spec, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := dataVolumeEmptyDir(tt.spec); got != tt.want { + t.Errorf("dataVolumeEmptyDir() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_containerRunningAsUser0(t *testing.T) { + tests := []struct { + name string + spec corev1.PodSpec + want bool + }{ + { + name: "empty pod spec returns false", + spec: corev1.PodSpec{}, + want: false, + }, + { + name: "agent container in pod spec with no security context returns false", + spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "agent", + }, + }, + }, + want: false, + }, + { + name: "agent container in pod spec security context and RunAsUser nil returns false", + spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "agent", + SecurityContext: &corev1.SecurityContext{ + RunAsUser: nil, + }, + }, + }, + }, + want: false, + }, + { + name: "agent container in pod spec set to run as user 1000 returns false", + spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "agent", + SecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(1000), + }, + }, + }, + }, + want: false, + }, + { + name: "agent container in pod spec set to run as user 0 returns true", + spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "agent", + SecurityContext: &corev1.SecurityContext{ + RunAsUser: pointer.Int64(0), + }, + }, + }, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := containerRunningAsUser0(tt.spec); got != tt.want { + t.Errorf("containerRunningAsUser0() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_runningAsRoot(t *testing.T) { + tests := []struct { + name string + spec *agentv1alpha1.AgentSpec + want bool + }{ + { + name: "daemonset with no security context returns false", + spec: &agentDaemonsetFixture.Spec, + want: false, + }, + { + name: "daemonset with security context no runAsUser returns false", + spec: withSecurityContext(agentDaemonsetFixture.Spec, &corev1.PodSecurityContext{}), + want: false, + }, + { + name: "daemonset with security context runAsUser 1000 returns false", + spec: withSecurityContext(agentDaemonsetFixture.Spec, &corev1.PodSecurityContext{ + RunAsUser: pointer.Int64(1000), + }), + want: false, + }, + { + name: "daemonset with security context runAsUser 0 returns true", + spec: withSecurityContext(agentDaemonsetFixture.Spec, &corev1.PodSecurityContext{ + RunAsUser: pointer.Int64(0), + }), + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := runningAsRoot(tt.spec); got != tt.want { + t.Errorf("runningAsRoot() = %v, want %v", got, tt.want) + } + }) + } +} + +func withSecurityContext(spec agentv1alpha1.AgentSpec, sec *corev1.PodSecurityContext) *agentv1alpha1.AgentSpec { + agentSpec := spec.DeepCopy() + if agentSpec.DaemonSet != nil { + agentSpec.DaemonSet.PodTemplate.Spec.SecurityContext = sec + return agentSpec + } + if agentSpec.Deployment != nil { + agentSpec.Deployment.PodTemplate.Spec.SecurityContext = sec + return agentSpec + } + return agentSpec +} + +func withVersion(spec agentv1alpha1.AgentSpec, version string) agentv1alpha1.AgentSpec { + agentSpec := spec.DeepCopy() + agentSpec.Version = version + return *agentSpec +} + +func Test_maybeAgentInitContainerForHostpathVolume(t *testing.T) { + type args struct { + params Params + v semver.Version + } + tests := []struct { + name string + args args + wantInitContainers []corev1.Container + }{ + { + name: "version 7.14 does not add init container", + args: args{ + params: Params{ + Agent: agentv1alpha1.Agent{ + Spec: withVersion(agentDaemonsetFixture.Spec, "7.14.0"), + }, + }, + v: semver.MustParse("7.14.0"), + }, + wantInitContainers: nil, + }, + { + name: "version 8.5.0 adds init container", + args: args{ + params: Params{ + Agent: agentv1alpha1.Agent{ + Spec: withVersion(agentDaemonsetFixture.Spec, "8.5.0"), + }, + }, + v: semver.MustParse("8.5.0"), + }, + wantInitContainers: []corev1.Container{ + { + Image: "docker.elastic.co/beats/elastic-agent:8.5.0", + Command: hostPathVolumeInitContainerCommand(false), + Name: hostPathVolumeInitContainerName, + SecurityContext: &corev1.SecurityContext{RunAsUser: pointer.Int64(0)}, + Resources: hostPathVolumeInitContainerResources, + VolumeMounts: []corev1.VolumeMount{ + { + Name: DataVolumeName, + MountPath: DataMountPath, + }, + }, + }, + }, + }, + { + name: "version 8.5.0 on openshift adds init container with privileged: true", + args: args{ + params: Params{ + Agent: agentv1alpha1.Agent{ + Spec: withVersion(agentDaemonsetFixture.Spec, "8.5.0"), + }, + OperatorParams: operator.Parameters{ + IsOpenshift: true, + }, + }, + v: semver.MustParse("8.5.0"), + }, + wantInitContainers: []corev1.Container{ + { + Image: "docker.elastic.co/beats/elastic-agent:8.5.0", + Command: hostPathVolumeInitContainerCommand(true), + Name: hostPathVolumeInitContainerName, + SecurityContext: &corev1.SecurityContext{RunAsUser: pointer.Int64(0), Privileged: pointer.Bool(true)}, + Resources: hostPathVolumeInitContainerResources, + VolumeMounts: []corev1.VolumeMount{ + { + Name: DataVolumeName, + MountPath: DataMountPath, + }, + }, + }, + }, + }, + { + name: "version 8.5.0 with Emptydir Volume adds no init container", + args: args{ + params: Params{ + Agent: agentv1alpha1.Agent{ + Spec: agentv1alpha1.AgentSpec{ + DaemonSet: &agentv1alpha1.DaemonSetSpec{ + PodTemplate: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: DataVolumeName, + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + }, + }, + }, + }, + }, + }, + }, + }, + v: semver.MustParse("8.5.0"), + }, + wantInitContainers: nil, + }, + { + name: "version 8.5.0 running as root adds no init container", + args: args{ + params: Params{ + Agent: agentv1alpha1.Agent{ + Spec: agentv1alpha1.AgentSpec{ + DaemonSet: &agentv1alpha1.DaemonSetSpec{ + PodTemplate: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + SecurityContext: &corev1.PodSecurityContext{ + RunAsUser: pointer.Int64(0), + }, + }, + }, + }, + }, + }, + }, + v: semver.MustParse("8.5.0"), + }, + wantInitContainers: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if gotInitContainers := maybeAgentInitContainerForHostpathVolume(tt.args.params, tt.args.v); !cmp.Equal(gotInitContainers, tt.wantInitContainers) { + t.Errorf("maybeAgentInitContainerForHostpathVolume() diff: %s", cmp.Diff(gotInitContainers, tt.wantInitContainers)) + } + }) + } +} diff --git a/pkg/controller/common/operator/parameters.go b/pkg/controller/common/operator/parameters.go index 2bee088fbd..3d596d17c1 100644 --- a/pkg/controller/common/operator/parameters.go +++ b/pkg/controller/common/operator/parameters.go @@ -49,4 +49,7 @@ type Parameters struct { ValidateStorageClass bool // Tracer is a shared APM tracer instance or nil Tracer *apm.Tracer + // IsOpenshift determines whether the operator is running within an Openshift + // environment in order to make configuration decisions. + IsOpenshift bool } diff --git a/pkg/utils/pointer/bool.go b/pkg/utils/pointer/bool.go new file mode 100644 index 0000000000..42c530eaea --- /dev/null +++ b/pkg/utils/pointer/bool.go @@ -0,0 +1,7 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package pointer + +func Bool(b bool) *bool { return &b } diff --git a/test/e2e/global_ca_test.go b/test/e2e/global_ca_test.go index ea343736b4..21dfe547ba 100644 --- a/test/e2e/global_ca_test.go +++ b/test/e2e/global_ca_test.go @@ -35,8 +35,9 @@ func TestGlobalCA(t *testing.T) { // Skip if it is the resilience pipeline because the ChaosJob can prevent // assert_operator_has_been_restarted_once_more to pass when it deletes an operator Pod - // exactly on restart. - if test.Ctx().Pipeline == "e2e/resilience" { + // exactly on restart. Also skip if running tests locally as the operator + // namespace, and the operator configmap is not present. + if test.Ctx().Pipeline == "e2e/resilience" || test.Ctx().Local { t.Skip() }