diff --git a/.ci/jobs/e2e-custom.yml b/.ci/jobs/e2e-custom.yml new file mode 100644 index 0000000000..146f5c1b17 --- /dev/null +++ b/.ci/jobs/e2e-custom.yml @@ -0,0 +1,30 @@ +--- +- job: + description: Job that runs e2e tests against custom ECK image running in a dedicated k8s cluster in GKE. This Job is managed by JJB. + logrotate: + daysToKeep: 7 + numToKeep: 100 + artifactDaysToKeep: 5 + artifactNumToKeep: 10 + name: cloud-on-k8s-e2e-tests-custom + project-type: pipeline + parameters: + - string: + name: IMAGE + description: "Docker image with ECK" + - string: + name: VERSION + default: 1.12 + description: "Kubernetes version, default is 1.12" + concurrent: true + pipeline-scm: + scm: + - git: + url: https://github.com/elastic/cloud-on-k8s + branches: + - master + credentials-id: 'f6c7695a-671e-4f4f-a331-acdce44ff9ba' + script-path: build/ci/e2e/custom_operator_image.jenkinsfile + lightweight-checkout: true + wrappers: + - ansicolor diff --git a/build/ci/Makefile b/build/ci/Makefile index a0ec04ea30..c329b1a404 100644 --- a/build/ci/Makefile +++ b/build/ci/Makefile @@ -97,7 +97,8 @@ ci-release: vault-public-key vault-docker-creds -e "REPOSITORY=$(REPOSITORY)" \ -e "ELASTIC_DOCKER_LOGIN=$(DOCKER_LOGIN)" \ -e "ELASTIC_DOCKER_PASSWORD=$(shell cat $(DOCKER_CREDENTIALS_FILE))" \ - -e "RELEASE=true" \ + -e "USE_ELASTIC_DOCKER_REGISTRY=true" \ + -e "SNAPSHOT_RELEASE=$(SNAPSHOT_RELEASE)" \ cloud-on-k8s-ci-release \ bash -c "make -C operators ci-release" @@ -130,7 +131,26 @@ ci-e2e: vault-gke-creds -e "TESTS_MATCH=$(TESTS_MATCH)" \ -e "GKE_CLUSTER_VERSION=$(GKE_CLUSTER_VERSION)" \ cloud-on-k8s-ci-e2e \ - bash -c "make -C operators ci-e2e GKE_MACHINE_TYPE=n1-standard-8" + bash -c "make -C operators ci-e2e" + +# Run e2e tests in GKE against provided ECK image +ci-e2e-rc: vault-gke-creds + docker build -f Dockerfile -t cloud-on-k8s-ci-e2e . + docker run --rm -t \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -v $(ROOT_DIR):$(GO_MOUNT_PATH) \ + -w $(GO_MOUNT_PATH) \ + -e "IMG_SUFFIX=-ci" \ + -e "GCLOUD_PROJECT=$(GCLOUD_PROJECT)" \ + -e "REGISTRY=$(REGISTRY)" \ + -e "REPOSITORY=$(GCLOUD_PROJECT)" \ + -e "GKE_CLUSTER_NAME=$(GKE_CLUSTER_NAME)" \ + -e "GKE_SERVICE_ACCOUNT_KEY_FILE=$(GO_MOUNT_PATH)/build/ci/$(GKE_CREDS_FILE)" \ + -e "TESTS_MATCH=$(TESTS_MATCH)" \ + -e "GKE_CLUSTER_VERSION=$(GKE_CLUSTER_VERSION)" \ + -e "OPERATOR_IMAGE=$(OPERATOR_IMAGE)" \ + cloud-on-k8s-ci-e2e \ + bash -c "make -C operators ci-e2e-rc" # Remove k8s cluster ci-e2e-delete-cluster: vault-gke-creds @@ -145,10 +165,22 @@ ci-e2e-delete-cluster: vault-gke-creds cloud-on-k8s-ci-e2e \ bash -c "make -C operators set-context-gke delete-gke" +# Remove all unused resources in GKE +ci-gke-cleanup: ci-e2e-delete-cluster + docker run --rm -t \ + -v $(ROOT_DIR):$(GO_MOUNT_PATH) \ + -w $(GO_MOUNT_PATH) \ + -e "GCLOUD_PROJECT=$(GCLOUD_PROJECT)" \ + -e "GKE_CLUSTER_NAME=$(GKE_CLUSTER_NAME)" \ + -e "GKE_SERVICE_ACCOUNT_KEY_FILE=$(GO_MOUNT_PATH)/build/ci/$(GKE_CREDS_FILE)" \ + cloud-on-k8s-ci-e2e \ + bash -c "GKE_CLUSTER_VERSION=1.11 $(GO_MOUNT_PATH)/operators/hack/gke-cluster.sh auth && \ + $(GO_MOUNT_PATH)/build/ci/delete_unused_disks.py" + # Run docs build ci-build-docs: docker run --rm -t \ - -v $(ROOT_DIR):$(GO_MOUNT_PATH) \ + -v $(ROOT_DIR):$(GO_MOUNT_PATH) \ docker.elastic.co/docs/build:1 \ bash -c "git clone https://github.com/elastic/docs.git && \ /docs/build_docs.pl --doc $(GO_MOUNT_PATH)/docs/index.asciidoc --out $(GO_MOUNT_PATH)/docs/html --chunk 1 && \ diff --git a/build/ci/delete_unused_disks.py b/build/ci/delete_unused_disks.py new file mode 100755 index 0000000000..c90249acfd --- /dev/null +++ b/build/ci/delete_unused_disks.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python + +import os +import json + +project = os.environ['GCLOUD_PROJECT'] + +os.system('gcloud compute disks list --filter="-users:*" --format="json" --project {} > unused_disks.json' + .format(project)) + +with open('unused_disks.json', 'r') as f: + content = f.read() + try: + parsed_json_dict = json.loads(content) + if len(parsed_json_dict) == 0: + print("There is no unused disks. Congratulations!") + else: + for entry in parsed_json_dict: + name = entry['name'] + head, tail = os.path.split(entry['zone']) + os.system('gcloud compute disks delete {} --project {} --zone {} --quiet' + .format(name, project, tail)) + except: + print("Can't parse JSON:") + print(content) diff --git a/build/ci/e2e/GKE_k8s_versions.jenkinsfile b/build/ci/e2e/GKE_k8s_versions.jenkinsfile index 18742e7dbe..c917d409f1 100644 --- a/build/ci/e2e/GKE_k8s_versions.jenkinsfile +++ b/build/ci/e2e/GKE_k8s_versions.jenkinsfile @@ -76,7 +76,7 @@ pipeline { for (int i = 0; i < clusters.size(); i++) { sh """ export GKE_CLUSTER_NAME=${clusters[i]} - make -C build/ci ci-e2e-delete-cluster + make -C build/ci ci-gke-cleanup """ } } diff --git a/build/ci/e2e/Jenkinsfile b/build/ci/e2e/Jenkinsfile index a8c4481bae..e24318dae6 100644 --- a/build/ci/e2e/Jenkinsfile +++ b/build/ci/e2e/Jenkinsfile @@ -43,7 +43,7 @@ pipeline { } } cleanup { - sh 'make -C build/ci ci-e2e-delete-cluster' + sh 'make -C build/ci ci-gke-cleanup' cleanWs() } } diff --git a/build/ci/e2e/custom_operator_image.jenkinsfile b/build/ci/e2e/custom_operator_image.jenkinsfile new file mode 100644 index 0000000000..7bbfe0af60 --- /dev/null +++ b/build/ci/e2e/custom_operator_image.jenkinsfile @@ -0,0 +1,53 @@ +pipeline { + + agent { + label 'linux' + } + + options { + timeout(time: 150, unit: 'MINUTES') + } + + environment { + VAULT_ADDR = credentials('vault-addr') + VAULT_ROLE_ID = credentials('vault-role-id') + VAULT_SECRET_ID = credentials('vault-secret-id') + REGISTRY = "eu.gcr.io" + GCLOUD_PROJECT = credentials('k8s-operators-gcloud-project') + GKE_CLUSTER_VERSION = "${VERSION}" + GKE_CLUSTER_NAME = "${BUILD_TAG}" + OPERATOR_IMAGE = "${IMAGE}" + LATEST_RELEASED_IMG = "${IMAGE}" + } + + stages { + stage('Checkout from GitHub') { + steps { + checkout scm + } + } + stage("Run E2E tests") { + steps { + sh 'make -C build/ci ci-e2e-rc' + } + } + } + + post { + unsuccessful { + script { + def msg = "E2E tests failed!\r\n" + env.BUILD_URL + slackSend botUser: true, + channel: '#cloud-k8s', + color: 'danger', + message: msg, + tokenCredentialId: 'cloud-ci-slack-integration-token' + } + } + cleanup { + sh 'make -C build/ci ci-e2e-delete-cluster' + cleanWs() + } + } + +} diff --git a/build/ci/pr/Jenkinsfile b/build/ci/pr/Jenkinsfile index ea56930e7e..2aca2b1d45 100644 --- a/build/ci/pr/Jenkinsfile +++ b/build/ci/pr/Jenkinsfile @@ -23,11 +23,16 @@ pipeline { stage('Run tests in parallel') { parallel { stage("Run unit and integration tests") { + when { + expression { + checkout scm + notOnlyDocs() + } + } agent { label 'linux' } steps { - checkout scm sh 'make -C build/ci ci-pr' } } @@ -38,11 +43,16 @@ pipeline { } } stage("Run smoke E2E tests") { + when { + expression { + checkout scm + notOnlyDocs() + } + } agent { label 'linux' } steps { - checkout scm sh 'make -C build/ci ci-e2e' } } @@ -51,10 +61,33 @@ pipeline { } post { + success { + withEnv([ + 'REGISTRY=push.docker.elastic.co', + 'REPOSITORY=eck-snapshots', + 'IMG_SUFFIX=', + 'SNAPSHOT_RELEASE=true', + 'TAG_NAME=${ghprbPullId}' + ]) { + sh 'make -C build/ci ci-release' + } + } cleanup { - sh 'make -C build/ci ci-e2e-delete-cluster' + script { + if (notOnlyDocs()) { + sh 'make -C build/ci ci-gke-cleanup' + } + } cleanWs() } } } + +def notOnlyDocs() { + // grep succeeds if there is at least one line without docs/ + return sh ( + script: "git diff --name-status HEAD~1 HEAD | grep -v docs/", + returnStatus: true + ) == 0 +} diff --git a/build/ci/release/Jenkinsfile b/build/ci/release/Jenkinsfile index 27322e769d..db7ecd5905 100644 --- a/build/ci/release/Jenkinsfile +++ b/build/ci/release/Jenkinsfile @@ -13,6 +13,7 @@ pipeline { IMG_NAME = 'eck-operator' IMG_SUFFIX = '' LATEST_RELEASED_IMG = "docker.elastic.co/${REPOSITORY}/${IMG_NAME}:${TAG_NAME}" + SNAPSHOT_RELEASE = 'false' } options { diff --git a/docs/elasticsearch-spec.asciidoc b/docs/elasticsearch-spec.asciidoc index 7b1c435c33..ac826c98eb 100644 --- a/docs/elasticsearch-spec.asciidoc +++ b/docs/elasticsearch-spec.asciidoc @@ -100,3 +100,141 @@ Example to create a Kubernetes TLS secret with a self-signed certificate: $ openssl req -x509 -newkey rsa:4096 -keyout tls.key -out tls.crt -days 365 -nodes $ kubectl create secret tls my-cert --cert tls.crt --key tls.key ---- + +[id="{p}-update-strategy"] +=== Update strategy + +The Elasticsearch cluster configuration can be updated at any time: + +* add new nodes +* remove some nodes +* change Elasticsearch configuration +* change pod resources (example: memory limits, cpu limit, environment variables, etc.) + +On any change, ECK reconciles Kubernetes resources towards the desired cluster definition. Changes are done in a rolling fashion: the state of the cluster is continuously monitored, to allow addition of new nodes and removal of deprecated nodes. + +[id="{p}-change-budget"] +==== Change budget + +No downtime should be expected when the cluster topology changes. Shards on deprecated nodes are migrated away so the node can be safely removed. + +For example, in order to mutate a 3-nodes cluster with 16GB memory limit on each node to a 3-nodes cluster with 32GB memory limit on each node, ECK will: + +1. add a new 32GB node: the cluster temporarily has 4 nodes +2. migrate data away from the first 16GB node +3. once data is migrated, remove the first 16GB node +4. follow the same steps for the 2 other 16GB nodes + +The cluster health stays green during the entire process. +By default, only one extra node can be added on top of the expected ones. In the example above, a 3-nodes cluster may temporarily be composed of 4 nodes while data migration is in progress. + +This behaviour can be controlled through the `changeBudget` section of the Cluster specification `updateStrategy`. If not specified, it defaults to the following: + +[source,yaml] +---- +spec: + updateStrategy: + changeBudget: + maxSurge: 1 + maxUnavailable: 0 +---- + +* `maxSurge` specifies the number of pods that can be added to the cluster, on top of the desired number of nodes in the spec during cluster updates +* `maxUnavailable` specifies the number of pods that can be made unavailable during cluster updates + +The default of `maxSurge: 1; maxUnavailable: 0` spins up an additional Elasticsearch node during cluster updates. +It is possible to speed up cluster topology changes by increasing `maxSurge`. For example, setting `maxSurge: 3` would allow 3 new nodes to be created while the original 3 migrate data in parallel. +The cluster would then temporarily have 6 nodes. + +Setting `maxSurge` to 0 and `maxUnavailable` to a positive value only allows a maximum number of pods to exist on the Kubernetes cluster. +For example, `maxSurge: 0; maxUnavailable: 1` would perform the 3 nodes upgrade this way: + +1. migrate data away from the first 16GB node +2. once data is migrated, remove the 16GB node: the cluster temporarily has 2 nodes +3. add a new 32GB node: the cluster grows to 3 nodes +4. follow the same steps for the 2 other 16GB nodes + +Even though any `changeBudget` can be specified, ECK will make sure some invariants are respected while a mutation is in progress: + +* there must be at least one master node alive in the cluster +* there must be at least one data node alive in the cluster + +Under certain circumstances, ECK will therefore ignore the change budget. For example, a safe migration from a 1-node cluster to another 1-node cluster can only be done by temporarily setting up a 2-nodes cluster. + +It is possible to configure the `changeBudget` to optimize for reusing Persistent Volumes instead of migrating data across nodes. This feature is not supported yet: more details to come in the next release. + +[id="{p}-group-definitions"] +==== Group definitions + +To optimize upgrades for highly available setups, ECK can take into account arbitrary nodes grouping. It prioritizes recovery of entire availability zones in catastrophic scenarios. + +For example, let's create a zone-aware Elasticsearch cluster. Some nodes will be created in `europe-west3-a`, and some others in `europe-west3-b`: + +[source,yaml] +---- +apiVersion: elasticsearch.k8s.elastic.co/v1alpha1 +kind: Elasticsearch +metadata: + name: quickstart +spec: + version: 7.1.0 + nodes: + - nodeCount: 3 + config: + node.attr.zone: europe-west3-a + cluster.routing.allocation.awareness.attributes: zone + podTemplate: + meta: + labels: + nodesGroup: group-a + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: failure-domain.beta.kubernetes.io/zone + operator: In + values: + - europe-west3-a + - nodeCount: 3 + config: + node.attr.zone: europe-west3-b + cluster.routing.allocation.awareness.attributes: zone + podTemplate: + meta: + labels: + nodesGroup: group-b + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: failure-domain.beta.kubernetes.io/zone + operator: In + values: + - europe-west3-b + updateStrategy: + changeBudget: + maxSurge: 1 + maxUnavailable: 0 + groups: + - selector: + matchLabels: + nodesGroup: group-a + - selector: + matchLabels: + nodesGroup: group-b +---- + +If a modification is applied to the Elasticsearch configuration of these 6 nodes, ECK will slowly upgrade the cluster nodes, taking the provided `changeBudget` into account. +In this example, it will spawn one additional node at a time, and migrate data away from one node at a time. + +Imagine a catastrophic situation occurs while the mutation is in progress: all nodes in `europe-west3-b` suddenly disappear. +ECK will detect it, and recreate the 3 missing nodes as expected. However, since a cluster upgrade is already in progress, the current `changeBudget may already be maxed out, preventing new nodes to be created in `europe-west3-b`. + +In this situation, it would be preferable to first recreate the missing nodes in `europe-west-3b`, then continue the cluster upgrade. + +In order to do so, ECK must know about the logical grouping of nodes. Since this is an arbitrary setting (can represent availability zones, but also nodes roles, hot-warm topologies, etc.), it must be specified in the `updateStrategy.groups` section of the Elasticsearch specification. +Nodes grouping is expressed through labels on the resources. In the example above, 3 pods are labeled with `group-a`, and the 3 other pods with `group-b`. \ No newline at end of file diff --git a/docs/k8s-quickstart.asciidoc b/docs/k8s-quickstart.asciidoc index 3d4f4e239a..bbaf0c274b 100644 --- a/docs/k8s-quickstart.asciidoc +++ b/docs/k8s-quickstart.asciidoc @@ -22,6 +22,8 @@ Make sure that you have link:https://kubernetes.io/docs/tasks/tools/install-kube NOTE: If you are using GKE, make sure your user has `cluster-admin` permissions. For more information, see link:https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#iam-rolebinding-bootstrap[Prerequisites for using Kubernetes RBAC on GKE]. +NOTE: If you are using Amazon EKS, make sure the Kubernetes control plane is allowed to communicate with nodes port 443. This is required for communication with the Validating Webhook. For more information, see link:https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html[Recommended inbound traffic]. + . Install link:https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/[custom resource definitions] and the operator with its RBAC rules: + [source,sh] diff --git a/operators/Makefile b/operators/Makefile index 50ec5fcc09..e83c756545 100644 --- a/operators/Makefile +++ b/operators/Makefile @@ -38,7 +38,10 @@ endif IMG_SUFFIX ?= -$(subst _,,$(USER)) IMG ?= $(REGISTRY)/$(REPOSITORY)/$(NAME)$(IMG_SUFFIX) TAG ?= $(shell git rev-parse --short --verify HEAD) -OPERATOR_IMAGE ?= $(IMG):$(TAG) +OPERATOR_IMAGE ?= $(IMG):$(VERSION)-$(TAG) +ifeq ($(SNAPSHOT_RELEASE), false) + OPERATOR_IMAGE = $(IMG):$(TAG) +endif OPERATOR_IMAGE_LATEST ?= $(IMG):latest GO_LDFLAGS := -X github.com/elastic/cloud-on-k8s/operators/pkg/about.version=$(VERSION) \ @@ -46,6 +49,9 @@ GO_LDFLAGS := -X github.com/elastic/cloud-on-k8s/operators/pkg/about.version=$(V -X github.com/elastic/cloud-on-k8s/operators/pkg/about.buildDate=$(shell date -u +'%Y-%m-%dT%H:%M:%SZ') \ -X github.com/elastic/cloud-on-k8s/operators/pkg/about.buildSnapshot=$(SNAPSHOT) +# Setting for CI, if set to true will prevent building and using local Docker image +SKIP_DOCKER_COMMAND ?= false + ## -- Namespaces # namespace in which the global operator is deployed (see config/global-operator) @@ -146,7 +152,11 @@ endif endif # Deploy both the global and namespace operators against the current k8s cluster -deploy: check-gke install-crds docker-build docker-push apply-operators +deploy: check-gke install-crds +ifeq ($(SKIP_DOCKER_COMMAND), false) + $(MAKE) docker-build docker-push +endif + $(MAKE) apply-operators apply-operators: OPERATOR_IMAGE=$(OPERATOR_IMAGE) \ @@ -235,8 +245,10 @@ bootstrap-gke: require-gcloud-project ifeq ($(PSP), 1) kubectl apply -f config/dev/elastic-psp.yaml endif +ifeq ($(SKIP_DOCKER_COMMAND), false) # push "latest" operator image to be used for init containers when running the operator locally $(MAKE) docker-build docker-push OPERATOR_IMAGE=$(OPERATOR_IMAGE_LATEST) +endif delete-gke: require-gcloud-project GKE_CLUSTER_VERSION=$(GKE_CLUSTER_VERSION) ./hack/gke-cluster.sh delete @@ -269,7 +281,7 @@ docker-build: -t $(OPERATOR_IMAGE) docker-push: -ifeq ($(RELEASE), true) +ifeq ($(USE_ELASTIC_DOCKER_REGISTRY), true) @ docker login -u $(ELASTIC_DOCKER_LOGIN) -p $(ELASTIC_DOCKER_PASSWORD) push.docker.elastic.co endif ifeq ($(KUBECTL_CLUSTER), minikube) @@ -341,14 +353,19 @@ ci: dep-vendor-only check-fmt generate check-local-changes unit integration e2e- # Let's use n1-standard-8 machine to have enough room for multiple pods on a single node. ci-e2e: ci-bootstrap-gke e2e +# Run e2e tests in gke using custom operator image +ci-e2e-rc: export SKIP_DOCKER_COMMAND=true +ci-e2e-rc: ci-bootstrap-gke e2e + ci-bootstrap-gke: PSP=1 GKE_MACHINE_TYPE=n1-standard-8 $(MAKE) bootstrap-gke ci-release: export GO_TAGS = release ci-release: export LICENSE_PUBKEY = $(ROOT_DIR)/build/ci/license.key -ci-release: export LATEST_RELEASED_IMG = docker.elastic.co/eck/eck-operator:$(TAG) +ci-release: export LATEST_RELEASED_IMG = docker.elastic.co/$(REPOSITORY)/eck-operator:$(TAG) ci-release: @ $(MAKE) dep-vendor-only generate docker-build docker-push + @ echo $(OPERATOR_IMAGE) was pushed! ########################## diff --git a/operators/cmd/manager/main.go b/operators/cmd/manager/main.go index c9f8995711..5b108ade60 100644 --- a/operators/cmd/manager/main.go +++ b/operators/cmd/manager/main.go @@ -135,11 +135,10 @@ func init() { ) Cmd.Flags().String( DebugHTTPServerListenAddressFlag, - ":6060", + "localhost:6060", "Listen address for debug HTTP server (only available in development mode)", ) - viper.BindPFlags(Cmd.Flags()) // enable using dashed notation in flags and underscores in env viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) @@ -212,11 +211,14 @@ func execute() { // restrict the operator to watch resources within a single namespace, unless empty Namespace: viper.GetString(NamespaceFlagName), } + + // only expose prometheus metrics if provided a specific port metricsPort := viper.GetInt(MetricsPortFlag) if metricsPort != 0 { + log.Info("Exposing Prometheus metrics on /metrics", "port", metricsPort) opts.MetricsBindAddress = fmt.Sprintf(":%d", metricsPort) - log.Info(fmt.Sprintf("Exposing Prometheus metrics on /metrics%s", opts.MetricsBindAddress)) } + mgr, err := manager.New(cfg, opts) if err != nil { log.Error(err, "unable to set up overall controller manager") diff --git a/operators/config/crds/apm_v1alpha1_apmserver.yaml b/operators/config/crds/apm_v1alpha1_apmserver.yaml index 1f57e3dadd..deee499398 100644 --- a/operators/config/crds/apm_v1alpha1_apmserver.yaml +++ b/operators/config/crds/apm_v1alpha1_apmserver.yaml @@ -47,6 +47,9 @@ spec: type: object spec: properties: + config: + description: Config represents the APM configuration. + type: object featureFlags: description: FeatureFlags are apm-specific flags that enable or disable specific experimental features @@ -130,6 +133,9 @@ spec: username: description: User is the username to use. type: string + required: + - username + - password type: object secret: description: SecretKeyRef is a secret that contains the @@ -173,12 +179,26 @@ spec: variables, affinity, resources, etc. for the pods created from this NodeSpec. type: object + secureSettings: + description: SecureSettings reference a secret containing secure settings, + to be injected into the APM keystore on each node. Each individual + key/value entry in the referenced secret is considered as an individual + secure setting to be injected. The secret must exist in the same namespace + as the APM resource. + properties: + secretName: + type: string + type: object version: description: Version represents the version of the APM Server type: string type: object status: properties: + controllerVersion: + description: ControllerVersion is the version of the controller that + last updated the ApmServer instance + type: string health: type: string secretTokenSecret: diff --git a/operators/config/crds/elasticsearch_v1alpha1_elasticsearch.yaml b/operators/config/crds/elasticsearch_v1alpha1_elasticsearch.yaml index 70c9306d9a..5e6d467f9b 100644 --- a/operators/config/crds/elasticsearch_v1alpha1_elasticsearch.yaml +++ b/operators/config/crds/elasticsearch_v1alpha1_elasticsearch.yaml @@ -246,6 +246,10 @@ spec: properties: clusterUUID: type: string + controllerVersion: + description: ControllerVersion is the version of the controller that + last updated the Elasticsearch cluster + type: string health: type: string masterNode: diff --git a/operators/config/crds/kibana_v1alpha1_kibana.yaml b/operators/config/crds/kibana_v1alpha1_kibana.yaml index 31b68ed907..77750a3464 100644 --- a/operators/config/crds/kibana_v1alpha1_kibana.yaml +++ b/operators/config/crds/kibana_v1alpha1_kibana.yaml @@ -72,6 +72,8 @@ spec: - password type: object secret: + description: SecretKeyRef is a secret that contains the credentials + to use. type: object type: object certificateAuthorities: @@ -187,6 +189,10 @@ spec: properties: associationStatus: type: string + controllerVersion: + description: ControllerVersion is the version of the controller that + last updated the Kibana instance + type: string health: type: string type: object diff --git a/operators/dev-setup.md b/operators/dev-setup.md index b227794025..9859cff7bb 100644 --- a/operators/dev-setup.md +++ b/operators/dev-setup.md @@ -30,7 +30,9 @@ Run `make check-requisites` to check that all dependencies are installed. ## Development -1. Get a working development Kubernetes cluster. You can either use: +1. Run `make dep-vendor-only` to download extra Go libraries needed to compile the project and store them in the vendor directory. + +2. Get a working development Kubernetes cluster. You can either use: [Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/#install-minikube) @@ -41,15 +43,16 @@ Run `make check-requisites` to check that all dependencies are installed. or [GKE](https://cloud.google.com/kubernetes-engine/) + Make sure that container registry authentication is correctly configured as described [here](https://cloud.google.com/container-registry/docs/advanced-authentication). + ```bash export GCLOUD_PROJECT=my-project-id make bootstrap-gke # Sets up GKE cluster with required resources ``` -2. Deploy the operator. +3. Deploy the operator. - * `make dep-vendor-only` to download extra Go libraries needed to compile the project and stores them in the vendor directory. * `make run` to run the operator locally, or `make deploy` to deploy the operators into the configured k8s cluster. * `make samples` to apply a sample stack resource. diff --git a/operators/hack/gke-cluster.sh b/operators/hack/gke-cluster.sh index 63c4aaf7ce..969ceda131 100755 --- a/operators/hack/gke-cluster.sh +++ b/operators/hack/gke-cluster.sh @@ -8,7 +8,7 @@ # of the necessary default settings so that no environment variable has to # be specified. # -# Usage: gke-cluster.sh (create|delete|name|registry|credentials) +# Usage: gke-cluster.sh (create|delete|name|registry|credentials|auth) # set -eu @@ -58,7 +58,7 @@ create_cluster() { exit 0 fi - local PSP_OPTION + local PSP_OPTION="" if [ "$PSP" == "1" ]; then PSP_OPTION="--enable-pod-security-policy" fi @@ -120,8 +120,11 @@ main() { auth_service_account export_credentials ;; + auth) + auth_service_account + ;; *) - echo "Usage: gke-cluster.sh (create|delete|name|registry|credentials)"; exit 1 + echo "Usage: gke-cluster.sh (create|delete|name|registry|credentials|auth)"; exit 1 ;; esac } diff --git a/operators/pkg/apis/apm/v1alpha1/apmserver_types.go b/operators/pkg/apis/apm/v1alpha1/apmserver_types.go index 1cf041f91a..150ab0e4a0 100644 --- a/operators/pkg/apis/apm/v1alpha1/apmserver_types.go +++ b/operators/pkg/apis/apm/v1alpha1/apmserver_types.go @@ -7,7 +7,6 @@ package v1alpha1 import ( commonv1alpha1 "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -24,6 +23,9 @@ type ApmServerSpec struct { // NodeCount defines how many nodes the Apm Server deployment must have. NodeCount int32 `json:"nodeCount,omitempty"` + // Config represents the APM configuration. + Config *commonv1alpha1.Config `json:"config,omitempty"` + // HTTP contains settings for HTTP. HTTP commonv1alpha1.HTTPConfig `json:"http,omitempty"` @@ -36,6 +38,13 @@ type ApmServerSpec struct { // +optional PodTemplate corev1.PodTemplateSpec `json:"podTemplate,omitempty"` + // SecureSettings reference a secret containing secure settings, to be injected + // into the APM keystore on each node. + // Each individual key/value entry in the referenced secret is considered as an + // individual secure setting to be injected. + // The secret must exist in the same namespace as the APM resource. + SecureSettings *commonv1alpha1.SecretRef `json:"secureSettings,omitempty"` + // FeatureFlags are apm-specific flags that enable or disable specific experimental features FeatureFlags commonv1alpha1.FeatureFlags `json:"featureFlags,omitempty"` } @@ -56,7 +65,7 @@ type ElasticsearchOutput struct { Hosts []string `json:"hosts,omitempty"` // Auth configures authentication for APM Server to use. - Auth ElasticsearchAuth `json:"auth,omitempty"` + Auth commonv1alpha1.ElasticsearchAuth `json:"auth,omitempty"` // SSL configures TLS-related configuration for Elasticsearch SSL ElasticsearchOutputSSL `json:"ssl,omitempty"` @@ -89,6 +98,8 @@ type ApmServerStatus struct { SecretTokenSecretName string `json:"secretTokenSecret,omitempty"` // Association is the status of any auto-linking to Elasticsearch clusters. Association commonv1alpha1.AssociationStatus + // ControllerVersion is the version of the controller that last updated the ApmServer instance + ControllerVersion string `json:"controllerVersion,omitempty"` } // IsDegraded returns true if the current status is worse than the previous. @@ -101,23 +112,6 @@ func (e ElasticsearchOutput) IsConfigured() bool { return len(e.Hosts) > 0 } -// ElasticsearchAuth contains auth config for APM Server to use with an Elasticsearch cluster -// TODO: this is a good candidate for sharing/reuse between this and Kibana due to association reuse potential. -type ElasticsearchAuth struct { - // Inline is auth provided as plaintext inline credentials. - Inline *ElasticsearchInlineAuth `json:"inline,omitempty"` - // SecretKeyRef is a secret that contains the credentials to use. - SecretKeyRef *v1.SecretKeySelector `json:"secret,omitempty"` -} - -// ElasticsearchInlineAuth is a basic username/password combination. -type ElasticsearchInlineAuth struct { - // User is the username to use. - Username string `json:"username,omitempty"` - // Password is the password to use. - Password string `json:"password,omitempty"` -} - // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -149,3 +143,19 @@ type ApmServerList struct { func init() { SchemeBuilder.Register(&ApmServer{}, &ApmServerList{}) } + +// IsMarkedForDeletion returns true if the APM is going to be deleted +func (as *ApmServer) IsMarkedForDeletion() bool { + if as.DeletionTimestamp.IsZero() { // already handles nil pointer + return false + } + return true +} + +func (as *ApmServer) ElasticsearchAuth() commonv1alpha1.ElasticsearchAuth { + return as.Spec.Output.Elasticsearch.Auth +} + +func (as *ApmServer) SecureSettings() *commonv1alpha1.SecretRef { + return as.Spec.SecureSettings +} diff --git a/operators/pkg/apis/apm/v1alpha1/zz_generated.deepcopy.go b/operators/pkg/apis/apm/v1alpha1/zz_generated.deepcopy.go index 7d818712b6..d9a2ed652b 100644 --- a/operators/pkg/apis/apm/v1alpha1/zz_generated.deepcopy.go +++ b/operators/pkg/apis/apm/v1alpha1/zz_generated.deepcopy.go @@ -10,7 +10,6 @@ package v1alpha1 import ( commonv1alpha1 "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" - v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -78,9 +77,18 @@ func (in *ApmServerList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ApmServerSpec) DeepCopyInto(out *ApmServerSpec) { *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = (*in).DeepCopy() + } in.HTTP.DeepCopyInto(&out.HTTP) in.Output.DeepCopyInto(&out.Output) in.PodTemplate.DeepCopyInto(&out.PodTemplate) + if in.SecureSettings != nil { + in, out := &in.SecureSettings, &out.SecureSettings + *out = new(commonv1alpha1.SecretRef) + **out = **in + } if in.FeatureFlags != nil { in, out := &in.FeatureFlags, &out.FeatureFlags *out = make(commonv1alpha1.FeatureFlags, len(*in)) @@ -118,48 +126,6 @@ func (in *ApmServerStatus) DeepCopy() *ApmServerStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ElasticsearchAuth) DeepCopyInto(out *ElasticsearchAuth) { - *out = *in - if in.Inline != nil { - in, out := &in.Inline, &out.Inline - *out = new(ElasticsearchInlineAuth) - **out = **in - } - if in.SecretKeyRef != nil { - in, out := &in.SecretKeyRef, &out.SecretKeyRef - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchAuth. -func (in *ElasticsearchAuth) DeepCopy() *ElasticsearchAuth { - if in == nil { - return nil - } - out := new(ElasticsearchAuth) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ElasticsearchInlineAuth) DeepCopyInto(out *ElasticsearchInlineAuth) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchInlineAuth. -func (in *ElasticsearchInlineAuth) DeepCopy() *ElasticsearchInlineAuth { - if in == nil { - return nil - } - out := new(ElasticsearchInlineAuth) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ElasticsearchOutput) DeepCopyInto(out *ElasticsearchOutput) { *out = *in diff --git a/operators/pkg/apis/common/v1alpha1/association.go b/operators/pkg/apis/common/v1alpha1/association.go index 4bb8088352..37858fcca1 100644 --- a/operators/pkg/apis/common/v1alpha1/association.go +++ b/operators/pkg/apis/common/v1alpha1/association.go @@ -4,6 +4,11 @@ package v1alpha1 +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + // AssociationStatus is the status of an assocation resource. type AssociationStatus string @@ -12,3 +17,14 @@ const ( AssociationEstablished AssociationStatus = "Established" AssociationFailed AssociationStatus = "Failed" ) + +// Associated interface represents a Elastic stack application that is associated with an Elasticsearch cluster. +// An associated object needs some credentials to establish a connection to the Elasticsearch cluster and usually it +// offers a keystore which in ECK is represented with an underlying Secret. +// Kibana and the APM server are two examples of associated objects. +type Associated interface { + metav1.Object + runtime.Object + ElasticsearchAuth() ElasticsearchAuth + SecureSettings() *SecretRef +} diff --git a/operators/pkg/apis/common/v1alpha1/authentication.go b/operators/pkg/apis/common/v1alpha1/authentication.go new file mode 100644 index 0000000000..878b71ed3a --- /dev/null +++ b/operators/pkg/apis/common/v1alpha1/authentication.go @@ -0,0 +1,28 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package v1alpha1 + +import v1 "k8s.io/api/core/v1" + +// ElasticsearchAuth contains auth config for Kibana to use with an Elasticsearch cluster +type ElasticsearchAuth struct { + // Inline is auth provided as plaintext inline credentials. + Inline *ElasticsearchInlineAuth `json:"inline,omitempty"` + // SecretKeyRef is a secret that contains the credentials to use. + SecretKeyRef *v1.SecretKeySelector `json:"secret,omitempty"` +} + +// IsConfigured returns true if one of the possible auth mechanisms is configured. +func (ea ElasticsearchAuth) IsConfigured() bool { + return ea.Inline != nil || ea.SecretKeyRef != nil +} + +// ElasticsearchInlineAuth is a basic username/password combination. +type ElasticsearchInlineAuth struct { + // User is the username to use. + Username string `json:"username"` + // Password is the password to use. + Password string `json:"password"` +} diff --git a/operators/pkg/apis/common/v1alpha1/zz_generated.deepcopy.go b/operators/pkg/apis/common/v1alpha1/zz_generated.deepcopy.go index d5d044d36e..08e0e99834 100644 --- a/operators/pkg/apis/common/v1alpha1/zz_generated.deepcopy.go +++ b/operators/pkg/apis/common/v1alpha1/zz_generated.deepcopy.go @@ -8,6 +8,10 @@ package v1alpha1 +import ( + v1 "k8s.io/api/core/v1" +) + // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. func (in *Config) DeepCopy() *Config { if in == nil { @@ -18,6 +22,48 @@ func (in *Config) DeepCopy() *Config { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchAuth) DeepCopyInto(out *ElasticsearchAuth) { + *out = *in + if in.Inline != nil { + in, out := &in.Inline, &out.Inline + *out = new(ElasticsearchInlineAuth) + **out = **in + } + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchAuth. +func (in *ElasticsearchAuth) DeepCopy() *ElasticsearchAuth { + if in == nil { + return nil + } + out := new(ElasticsearchAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchInlineAuth) DeepCopyInto(out *ElasticsearchInlineAuth) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchInlineAuth. +func (in *ElasticsearchInlineAuth) DeepCopy() *ElasticsearchInlineAuth { + if in == nil { + return nil + } + out := new(ElasticsearchInlineAuth) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FeatureFlagState) DeepCopyInto(out *FeatureFlagState) { *out = *in diff --git a/operators/pkg/apis/elasticsearch/v1alpha1/elasticsearch_types.go b/operators/pkg/apis/elasticsearch/v1alpha1/elasticsearch_types.go index 913f53510e..5ba8616fe7 100644 --- a/operators/pkg/apis/elasticsearch/v1alpha1/elasticsearch_types.go +++ b/operators/pkg/apis/elasticsearch/v1alpha1/elasticsearch_types.go @@ -219,6 +219,8 @@ type ElasticsearchStatus struct { MasterNode string `json:"masterNode,omitempty"` ExternalService string `json:"service,omitempty"` ZenDiscovery ZenDiscoveryStatus `json:"zenDiscovery,omitempty"` + // ControllerVersion is the version of the controller that last updated the Elasticsearch cluster + ControllerVersion string `json:"controllerVersion,omitempty"` } type ZenDiscoveryStatus struct { diff --git a/operators/pkg/apis/kibana/v1alpha1/kibana_types.go b/operators/pkg/apis/kibana/v1alpha1/kibana_types.go index 4b48091c53..7f3c4de325 100644 --- a/operators/pkg/apis/kibana/v1alpha1/kibana_types.go +++ b/operators/pkg/apis/kibana/v1alpha1/kibana_types.go @@ -6,7 +6,6 @@ package v1alpha1 import ( corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" commonv1alpha1 "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" @@ -62,7 +61,7 @@ type BackendElasticsearch struct { URL string `json:"url"` // Auth configures authentication for Kibana to use. - Auth ElasticsearchAuth `json:"auth,omitempty"` + Auth commonv1alpha1.ElasticsearchAuth `json:"auth,omitempty"` // CertificateAuthorities names a secret that contains a CA file entry to use. CertificateAuthorities commonv1alpha1.SecretRef `json:"certificateAuthorities,omitempty"` @@ -73,26 +72,6 @@ func (b BackendElasticsearch) IsConfigured() bool { return b.URL != "" && b.Auth.IsConfigured() && b.CertificateAuthorities.SecretName != "" } -// ElasticsearchAuth contains auth config for Kibana to use with an Elasticsearch cluster -type ElasticsearchAuth struct { - // Inline is auth provided as plaintext inline credentials. - Inline *ElasticsearchInlineAuth `json:"inline,omitempty"` - SecretKeyRef *v1.SecretKeySelector `json:"secret,omitempty"` -} - -// IsConfigured returns true if one of the possible auth mechanisms is configured. -func (ea ElasticsearchAuth) IsConfigured() bool { - return ea.Inline != nil || ea.SecretKeyRef != nil -} - -// ElasticsearchInlineAuth is a basic username/password combination. -type ElasticsearchInlineAuth struct { - // User is the username to use. - Username string `json:"username"` - // Password is the password to use. - Password string `json:"password"` -} - // KibanaHealth expresses the status of the Kibana instances. type KibanaHealth string @@ -108,6 +87,8 @@ type KibanaStatus struct { commonv1alpha1.ReconcilerStatus Health KibanaHealth `json:"health,omitempty"` AssociationStatus commonv1alpha1.AssociationStatus `json:"associationStatus,omitempty"` + // ControllerVersion is the version of the controller that last updated the Kibana instance + ControllerVersion string `json:"controllerVersion,omitempty"` } // IsDegraded returns true if the current status is worse than the previous. @@ -116,13 +97,21 @@ func (ks KibanaStatus) IsDegraded(prev KibanaStatus) bool { } // IsMarkedForDeletion returns true if the Kibana is going to be deleted -func (e Kibana) IsMarkedForDeletion() bool { - if e.DeletionTimestamp.IsZero() { // already handles nil pointer +func (k Kibana) IsMarkedForDeletion() bool { + if k.DeletionTimestamp.IsZero() { // already handles nil pointer return false } return true } +func (k *Kibana) ElasticsearchAuth() commonv1alpha1.ElasticsearchAuth { + return k.Spec.Elasticsearch.Auth +} + +func (k *Kibana) SecureSettings() *commonv1alpha1.SecretRef { + return k.Spec.SecureSettings +} + // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/operators/pkg/apis/kibana/v1alpha1/kibana_types_test.go b/operators/pkg/apis/kibana/v1alpha1/kibana_types_test.go index fedf466662..e490cff985 100644 --- a/operators/pkg/apis/kibana/v1alpha1/kibana_types_test.go +++ b/operators/pkg/apis/kibana/v1alpha1/kibana_types_test.go @@ -14,7 +14,7 @@ func TestBackendElasticsearch_IsConfigured(t *testing.T) { caSecretName := "ca-dummy" type fields struct { URL string - Auth ElasticsearchAuth + Auth v1alpha1.ElasticsearchAuth CertificateAuthorities v1alpha1.SecretRef } tests := []struct { @@ -25,7 +25,7 @@ func TestBackendElasticsearch_IsConfigured(t *testing.T) { { name: "empty backend is not configured", fields: fields{ - Auth: ElasticsearchAuth{}, + Auth: v1alpha1.ElasticsearchAuth{}, }, want: false, }, @@ -33,8 +33,8 @@ func TestBackendElasticsearch_IsConfigured(t *testing.T) { name: "some fields missing is not configured", fields: fields{ URL: "i am an url", - Auth: ElasticsearchAuth{ - Inline: &ElasticsearchInlineAuth{ + Auth: v1alpha1.ElasticsearchAuth{ + Inline: &v1alpha1.ElasticsearchInlineAuth{ Username: "foo", Password: "bar", }, @@ -46,8 +46,8 @@ func TestBackendElasticsearch_IsConfigured(t *testing.T) { name: "all fields configured", fields: fields{ URL: "i am an url", - Auth: ElasticsearchAuth{ - Inline: &ElasticsearchInlineAuth{ + Auth: v1alpha1.ElasticsearchAuth{ + Inline: &v1alpha1.ElasticsearchInlineAuth{ Username: "foo", Password: "bar", }, diff --git a/operators/pkg/apis/kibana/v1alpha1/zz_generated.deepcopy.go b/operators/pkg/apis/kibana/v1alpha1/zz_generated.deepcopy.go index 795d5c8a4a..609a388217 100644 --- a/operators/pkg/apis/kibana/v1alpha1/zz_generated.deepcopy.go +++ b/operators/pkg/apis/kibana/v1alpha1/zz_generated.deepcopy.go @@ -10,7 +10,6 @@ package v1alpha1 import ( commonv1alpha1 "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" - v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -32,48 +31,6 @@ func (in *BackendElasticsearch) DeepCopy() *BackendElasticsearch { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ElasticsearchAuth) DeepCopyInto(out *ElasticsearchAuth) { - *out = *in - if in.Inline != nil { - in, out := &in.Inline, &out.Inline - *out = new(ElasticsearchInlineAuth) - **out = **in - } - if in.SecretKeyRef != nil { - in, out := &in.SecretKeyRef, &out.SecretKeyRef - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchAuth. -func (in *ElasticsearchAuth) DeepCopy() *ElasticsearchAuth { - if in == nil { - return nil - } - out := new(ElasticsearchAuth) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ElasticsearchInlineAuth) DeepCopyInto(out *ElasticsearchInlineAuth) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchInlineAuth. -func (in *ElasticsearchInlineAuth) DeepCopy() *ElasticsearchInlineAuth { - if in == nil { - return nil - } - out := new(ElasticsearchInlineAuth) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Kibana) DeepCopyInto(out *Kibana) { *out = *in diff --git a/operators/pkg/controller/apmserver/apmserver_controller.go b/operators/pkg/controller/apmserver/apmserver_controller.go index afe3916cb9..c80565a7be 100644 --- a/operators/pkg/controller/apmserver/apmserver_controller.go +++ b/operators/pkg/controller/apmserver/apmserver_controller.go @@ -7,10 +7,26 @@ package apmserver import ( "crypto/sha256" "fmt" + "path/filepath" "reflect" "sync/atomic" "time" + apmv1alpha1 "github.com/elastic/cloud-on-k8s/operators/pkg/apis/apm/v1alpha1" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/apmserver/config" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/apmserver/labels" + apmname "github.com/elastic/cloud-on-k8s/operators/pkg/controller/apmserver/name" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/association/keystore" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/certificates" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/defaults" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/events" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/finalizer" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/operator" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/reconciler" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/volume" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/watches" + "github.com/elastic/cloud-on-k8s/operators/pkg/utils/k8s" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -25,53 +41,58 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "sigs.k8s.io/controller-runtime/pkg/source" - "sigs.k8s.io/yaml" - - apmv1alpha1 "github.com/elastic/cloud-on-k8s/operators/pkg/apis/apm/v1alpha1" - "github.com/elastic/cloud-on-k8s/operators/pkg/controller/apmserver/config" - "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common" - "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/certificates" - "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/defaults" - "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/events" - "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/operator" - "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/reconciler" - "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/volume" - "github.com/elastic/cloud-on-k8s/operators/pkg/utils/k8s" ) const ( name = "apmserver-controller" esCAChecksumLabelName = "apm.k8s.elastic.co/es-ca-file-checksum" configChecksumLabelName = "apm.k8s.elastic.co/config-file-checksum" + + // ApmBaseDir is the base directory of the APM server + ApmBaseDir = "/usr/share/apm-server" ) -var log = logf.Log.WithName(name) +var ( + log = logf.Log.WithName(name) + + // ApmServerBin is the apm server binary file + ApmServerBin = filepath.Join(ApmBaseDir, "apm-server") + + initContainerParameters = keystore.InitContainerParameters{ + KeystoreCreateCommand: ApmServerBin + " keystore create --force", + KeystoreAddCommand: ApmServerBin + " keystore add", + SecureSettingsVolumeMountPath: keystore.SecureSettingsVolumeMountPath, + DataVolumePath: DataVolumePath, + } +) // Add creates a new ApmServer Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller // and Start it when the Manager is Started. func Add(mgr manager.Manager, params operator.Parameters) error { - return add(mgr, newReconciler(mgr)) + reconciler := newReconciler(mgr, params) + c, err := add(mgr, reconciler) + if err != nil { + return err + } + return addWatches(c, reconciler) } // newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager) reconcile.Reconciler { +func newReconciler(mgr manager.Manager, params operator.Parameters) *ReconcileApmServer { + client := k8s.WrapClient(mgr.GetClient()) return &ReconcileApmServer{ - Client: k8s.WrapClient(mgr.GetClient()), - scheme: mgr.GetScheme(), - recorder: mgr.GetRecorder(name), + Client: client, + scheme: mgr.GetScheme(), + recorder: mgr.GetRecorder(name), + dynamicWatches: watches.NewDynamicWatches(), + finalizers: finalizer.NewHandler(client), + Parameters: params, } } -// add adds a new Controller to mgr with r as the reconcile.Reconciler -func add(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller - c, err := controller.New(name, mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - +func addWatches(c controller.Controller, r *ReconcileApmServer) error { // Watch for changes to ApmServer - err = c.Watch(&source.Kind{Type: &apmv1alpha1.ApmServer{}}, &handler.EnqueueRequestForObject{}) + err := c.Watch(&source.Kind{Type: &apmv1alpha1.ApmServer{}}, &handler.EnqueueRequestForObject{}) if err != nil { return err } @@ -100,17 +121,30 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { return err } + // dynamically watch referenced secrets to connect to Elasticsearch + if err := c.Watch(&source.Kind{Type: &corev1.Secret{}}, r.dynamicWatches.Secrets); err != nil { + return err + } + return nil } +// add adds a new Controller to mgr with r as the reconcile.Reconciler +func add(mgr manager.Manager, r reconcile.Reconciler) (controller.Controller, error) { + // Create a new controller + return controller.New(name, mgr, controller.Options{Reconciler: r}) +} + var _ reconcile.Reconciler = &ReconcileApmServer{} // ReconcileApmServer reconciles an ApmServer object type ReconcileApmServer struct { k8s.Client - scheme *runtime.Scheme - recorder record.EventRecorder - + scheme *runtime.Scheme + recorder record.EventRecorder + dynamicWatches watches.DynamicWatches + finalizers finalizer.Handler + operator.Parameters // iteration is the number of times this controller has run its Reconcile method iteration int64 } @@ -121,20 +155,15 @@ func (r *ReconcileApmServer) Reconcile(request reconcile.Request) (reconcile.Res // atomically update the iteration to support concurrent runs. currentIteration := atomic.AddInt64(&r.iteration, 1) iterationStartTime := time.Now() - log.Info("Start reconcile iteration", "iteration", currentIteration) + log.Info("Start reconcile iteration", "iteration", currentIteration, "namespace", request.Namespace, "as_name", request.Name) defer func() { - log.Info("End reconcile iteration", "iteration", currentIteration, "took", time.Since(iterationStartTime)) + log.Info("End reconcile iteration", "iteration", currentIteration, "took", time.Since(iterationStartTime), "namespace", request.Namespace, "as_name", request.Name) }() // Fetch the ApmServer resource as := &apmv1alpha1.ApmServer{} err := r.Get(request.NamespacedName, as) - if common.IsPaused(as.ObjectMeta) { - log.Info("Paused : skipping reconciliation", "iteration", currentIteration) - return common.PauseRequeue, nil - } - if err != nil { if errors.IsNotFound(err) { // Object not found, return. Created objects are automatically garbage collected. @@ -145,10 +174,33 @@ func (r *ReconcileApmServer) Reconcile(request reconcile.Request) (reconcile.Res return reconcile.Result{}, err } + if common.IsPaused(as.ObjectMeta) { + log.Info("Object is paused. Skipping reconciliation", "namespace", as.Namespace, "as_name", as.Name, "iteration", currentIteration) + return common.PauseRequeue, nil + } + + if err := r.finalizers.Handle(as, r.finalizersFor(*as)...); err != nil { + if errors.IsConflict(err) { + log.V(1).Info("Conflict while handling secret watch finalizer") + return reconcile.Result{Requeue: true}, nil + } + return reconcile.Result{}, err + } + + if as.IsMarkedForDeletion() { + // APM server will be deleted nothing to do other than run finalizers + return reconcile.Result{}, nil + } + state := NewState(request, as) + state.UpdateApmServerControllerVersion(r.OperatorInfo.BuildInfo.Version) state, err = r.reconcileApmServerDeployment(state, as) if err != nil { + if errors.IsConflict(err) { + log.V(1).Info("Conflict while updating status") + return reconcile.Result{Requeue: true}, nil + } return state.Result, err } @@ -169,18 +221,18 @@ func (r *ReconcileApmServer) reconcileApmServerDeployment( as *apmv1alpha1.ApmServer, ) (State, error) { if !as.Spec.Output.Elasticsearch.IsConfigured() { - log.Info("Aborting ApmServer deployment reconciliation as no Elasticsearch output is configured") + log.Info("Aborting ApmServer deployment reconciliation as no Elasticsearch output is configured", + "namespace", as.Namespace, "as_name", as.Name) return state, nil } - // TODO: move server and config secrets into separate methods + // TODO: move server secret into separate method expectedApmServerSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: as.Namespace, - // TODO: suffix+trim properly - Name: as.Name + "-apm-server", - Labels: NewLabels(as.Name), + Name: apmname.SecretToken(as.Name), + Labels: labels.NewLabels(as.Name), }, Data: map[string][]byte{ SecretTokenKey: []byte(rand.String(24)), @@ -222,65 +274,32 @@ func (r *ReconcileApmServer) reconcileApmServerDeployment( reconciledApmServerSecret.Data = expectedApmServerSecret.Data }, PreCreate: func() { - log.Info("Creating apm server secret", "name", expectedApmServerSecret.Name) + log.Info("Creating apm server secret", "namespace", expectedApmServerSecret.Namespace, "secret_name", expectedApmServerSecret.Name, "as_name", as.Name) }, PreUpdate: func() { - log.Info("Updating apm server secret", "name", expectedApmServerSecret.Name) + log.Info("Updating apm server secret", "namespace", expectedApmServerSecret.Namespace, "secret_name", expectedApmServerSecret.Name, "as_name", as.Name) }, }, ); err != nil { return state, err } - cfg, err := config.FromResourceSpec(r.Client, *as) + reconciledConfigSecret, err := config.Reconcile(r.Client, r.scheme, as) if err != nil { return state, err } - cfgBytes, err := yaml.Marshal(cfg) + keystoreResources, err := keystore.NewResources( + r.Client, + r.recorder, + r.dynamicWatches, + as, + initContainerParameters, + ) if err != nil { return state, err } - expectedConfigSecret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: as.Namespace, - // TODO: suffix+trim properly - Name: as.Name + "-config", - Labels: NewLabels(as.Name), - }, - Data: map[string][]byte{ - "apm-server.yml": cfgBytes, - }, - } - reconciledConfigSecret := &corev1.Secret{} - if err := reconciler.ReconcileResource( - reconciler.Params{ - Client: r.Client, - Scheme: r.scheme, - - Owner: as, - Expected: expectedConfigSecret, - Reconciled: reconciledConfigSecret, - - NeedsUpdate: func() bool { - return true - }, - UpdateReconciled: func() { - reconciledConfigSecret.Labels = expectedConfigSecret.Labels - reconciledConfigSecret.Data = expectedConfigSecret.Data - }, - PreCreate: func() { - log.Info("Creating config secret", "name", expectedConfigSecret.Name) - }, - PreUpdate: func() { - log.Info("Updating config secret", "name", expectedConfigSecret.Name) - }, - }, - ); err != nil { - return state, err - } - apmServerPodSpecParams := PodSpecParams{ Version: as.Spec.Version, CustomImageName: as.Spec.Image, @@ -289,13 +308,21 @@ func (r *ReconcileApmServer) reconcileApmServerDeployment( ApmServerSecret: *reconciledApmServerSecret, ConfigSecret: *reconciledConfigSecret, + + keystoreResources: keystoreResources, } - podSpec := NewPodSpec(apmServerPodSpecParams) + podSpec := newPodSpec(as, apmServerPodSpecParams) + + podLabels := labels.NewLabels(as.Name) - podLabels := NewLabels(as.Name) - // add the config file checksum to the pod labels so a change triggers a rolling update - podLabels[configChecksumLabelName] = fmt.Sprintf("%x", sha256.Sum224(cfgBytes)) + // Build a checksum of the configuration, add it to the pod labels so a change triggers a rolling update + configChecksum := sha256.New224() + configChecksum.Write(reconciledConfigSecret.Data[config.ApmCfgSecretKey]) + if keystoreResources != nil { + configChecksum.Write([]byte(keystoreResources.Version)) + } + podLabels[configChecksumLabelName] = fmt.Sprintf("%x", configChecksum.Sum(nil)) esCASecretName := as.Spec.Output.Elasticsearch.SSL.CertificateAuthorities.SecretName if esCASecretName != "" { @@ -305,7 +332,7 @@ func (r *ReconcileApmServer) reconcileApmServerDeployment( esCAVolume := volume.NewSecretVolumeWithMountPath( esCASecretName, "elasticsearch-certs", - "/usr/share/apm-server/config/elasticsearch-certs", + filepath.Join(ApmBaseDir, config.CertificatesDir), ) // build a checksum of the cert file used by ES, which we can use to cause the Deployment to roll the Apm Server @@ -337,12 +364,11 @@ func (r *ReconcileApmServer) reconcileApmServerDeployment( // TODO: also need to hash secret token? - deploymentLabels := NewLabels(as.Name) + deploymentLabels := labels.NewLabels(as.Name) podSpec.Labels = defaults.SetDefaultLabels(podSpec.Labels, podLabels) deploy := NewDeployment(DeploymentParams{ - // TODO: revisit naming? - Name: PseudoNamespacedResourceName(*as), + Name: apmname.Deployment(as.Name), Namespace: as.Namespace, Replicas: as.Spec.NodeCount, Selector: deploymentLabels, @@ -365,6 +391,19 @@ func (r *ReconcileApmServer) updateStatus(state State) (reconcile.Result, error) if state.ApmServer.Status.IsDegraded(current.Status) { r.recorder.Event(current, corev1.EventTypeWarning, events.EventReasonUnhealthy, "Apm Server health degraded") } - log.Info("Updating status", "iteration", atomic.LoadInt64(&r.iteration)) - return state.Result, r.Status().Update(state.ApmServer) + log.Info("Updating status", "namespace", state.ApmServer.Namespace, "as_name", state.ApmServer.Name, "iteration", atomic.LoadInt64(&r.iteration)) + err := r.Status().Update(state.ApmServer) + if err != nil && errors.IsConflict(err) { + log.V(1).Info("Conflict while updating status") + return reconcile.Result{Requeue: true}, nil + } + + return state.Result, err +} + +// finalizersFor returns the list of finalizers applying to a given APM deployment +func (r *ReconcileApmServer) finalizersFor(as apmv1alpha1.ApmServer) []finalizer.Finalizer { + return []finalizer.Finalizer{ + keystore.Finalizer(k8s.ExtractNamespacedName(&as), r.dynamicWatches, &as), + } } diff --git a/operators/pkg/controller/apmserver/config/config.go b/operators/pkg/controller/apmserver/config/config.go index 6e3366e554..a8612e1727 100644 --- a/operators/pkg/controller/apmserver/config/config.go +++ b/operators/pkg/controller/apmserver/config/config.go @@ -5,189 +5,101 @@ package config import ( - "encoding/json" "fmt" + "path/filepath" "github.com/elastic/cloud-on-k8s/operators/pkg/apis/apm/v1alpha1" + commonv1alpha1 "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/association" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/certificates" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/settings" "github.com/elastic/cloud-on-k8s/operators/pkg/utils/k8s" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" ) -// DefaultHTTPPort is the (default) port used by ApmServer -const DefaultHTTPPort = 8200 +const ( + // DefaultHTTPPort is the (default) port used by ApmServer + DefaultHTTPPort = 8200 -// FromResourceSpec resolves the ApmServer configuration to use based on the provided spec. -// TODO: missing test -func FromResourceSpec(c k8s.Client, as v1alpha1.ApmServer) (*Config, error) { - // TODO: consider scaling the default values provided based on the apm server resources - // these defaults are taken (without scaling) from a defaulted ECE install + // Certificates + CertificatesDir = "config/elasticsearch-certs" +) - username, password, err := getCredentials(c, as) - if err != nil { - return nil, err +// DefaultConfiguration is the default configuration of an APM server. +// These defaults are taken (without scaling) from a defaulted ECE install +// TODO: consider scaling the default values provided based on the apm server resources +var DefaultConfiguration = []byte(` +apm-server: + concurrent_requests: 1 + max_unzipped_size: 5242880 + read_timeout: 3600 + rum: + enabled: true + rate_limit: 10 + shutdown_timeout: 30s + ssl: + enabled: false +logging: + json: true + metrics.enabled: true +output: + elasticsearch: + compression_level: 5 + max_bulk_size: 267 + worker: 5 +queue: + mem: + events: 2000 + flush: + min_events: 267 + timeout: 1s +setup.template.settings.index: + auto_expand_replicas: 0-2 + number_of_replicas: 1 + number_of_shards: 1 +xpack.monitoring.enabled: true +`) + +func NewConfigFromSpec(c k8s.Client, as v1alpha1.ApmServer) (*settings.CanonicalConfig, error) { + specConfig := as.Spec.Config + if specConfig == nil { + specConfig = &commonv1alpha1.Config{} } - return &Config{ - Name: "${POD_NAME}", - ApmServer: ApmServerConfig{ - Host: fmt.Sprintf(":%d", DefaultHTTPPort), - SecretToken: "${SECRET_TOKEN}", - ReadTimeout: 3600, - ShutdownTimeout: "30s", - Rum: RumConfig{Enabled: true, RateLimit: 10}, - ConcurrentRequests: 1, - MaxUnzippedSize: 5242880, - // TODO: TLS support for the server itself - SSL: TLSConfig{ - Enabled: false, - }, - }, - XPackMonitoringEnabled: true, - - Logging: LoggingConfig{ - JSON: true, - MetricsEnabled: true, - }, - Queue: QueueConfig{ - Mem: QueueMemConfig{ - Events: 2000, - Flush: FlushConfig{ - MinEvents: 267, - Timeout: "1s", - }, - }, - }, - SetupTemplateSettingsIndex: SetupTemplateSettingsIndex{ - NumberOfShards: 1, - NumberOfReplicas: 1, - AutoExpandReplicas: "0-2", - }, - Output: OutputConfig{ - Elasticsearch: ElasticsearchOutputConfig{ - Worker: 5, - MaxBulkSize: 267, - CompressionLevel: 5, - Hosts: as.Spec.Output.Elasticsearch.Hosts, - Username: username, - Password: password, - // TODO: optional TLS - SSL: TLSConfig{ - Enabled: true, - // TODO: hardcoded path - CertificateAuthorities: []string{"config/elasticsearch-certs/" + certificates.CertFileName}, - }, - // TODO: include indices? or will they be defaulted fine? - }, - }, - }, nil -} - -func getCredentials(c k8s.Client, as v1alpha1.ApmServer) (username, password string, err error) { - auth := as.Spec.Output.Elasticsearch.Auth - - if auth.Inline != nil { - return auth.Inline.Username, auth.Inline.Password, nil + userSettings, err := settings.NewCanonicalConfigFrom(specConfig.Data) + if err != nil { + return nil, err } - // if auth is provided via a secret, resolve credentials from it. - if auth.SecretKeyRef != nil { - secretObjKey := types.NamespacedName{Namespace: as.Namespace, Name: auth.SecretKeyRef.Name} - var secret v1.Secret - if err := c.Get(secretObjKey, &secret); err != nil { - return "", "", err - } - return auth.SecretKeyRef.Key, string(secret.Data[auth.SecretKeyRef.Key]), nil + // Get username and password + username, password, err := association.ElasticsearchAuthSettings(c, &as) + if err != nil { + return nil, err } - // no authentication method provided, return an empty credential - return "", "", nil -} - -type Config struct { - Name string `json:"name,omitempty"` - ApmServer ApmServerConfig `json:"apm-server,omitempty"` - XPackMonitoringEnabled bool `json:"xpack.monitoring.enabled,omitempty"` - Logging LoggingConfig `json:"logging,omitempty"` - Queue QueueConfig `json:"queue,omitempty"` - Output OutputConfig `json:"output,omitempty"` - SetupTemplateSettingsIndex SetupTemplateSettingsIndex `json:"setup.template.settings.index,omitempty"` -} - -type OutputConfig struct { - Elasticsearch ElasticsearchOutputConfig `json:"elasticsearch,omitempty"` - // TODO support other outputs. -} - -type SetupTemplateSettingsIndex struct { - NumberOfShards int `json:"number_of_shards,omitempty"` - NumberOfReplicas int `json:"number_of_replicas,omitempty"` - AutoExpandReplicas string `json:"auto_expand_replicas,omitempty"` -} - -type ApmServerConfig struct { - Host string `json:"host,omitempty"` - ReadTimeout int `json:"read_timeout,omitempty"` - ShutdownTimeout string `json:"shutdown_timeout,omitempty"` - SecretToken string `json:"secret_token,omitempty"` - SSL TLSConfig `json:"ssl,omitempty"` - Rum RumConfig `json:"rum,omitempty"` - ConcurrentRequests int `json:"concurrent_requests,omitempty"` - MaxUnzippedSize int `json:"max_unzipped_size,omitempty"` -} - -type RumConfig struct { - Enabled bool `json:"enabled,omitempty"` - RateLimit int `json:"rate_limit,omitempty"` -} - -type TLSConfig struct { - Enabled bool `json:"enabled"` - Certificate string `json:"certificate,omitempty"` - Key string `json:"key,omitempty"` - CertificateAuthorities []string `json:"certificate_authorities,omitempty"` -} + // Create a base configuration. + cfg := settings.MustCanonicalConfig(map[string]interface{}{ + "apm-server.host": fmt.Sprintf(":%d", DefaultHTTPPort), + "apm-server.secret_token": "${SECRET_TOKEN}", + }) -type LoggingConfig struct { - Level string `json:"level,omitempty"` - ToFiles bool `json:"to_files,omitempty"` - JSON bool `json:"json,omitempty"` - MetricsEnabled bool `json:"metrics.enabled,omitempty"` -} - -type LoggingFilesConfig struct { - Path string `json:"path,omitempty"` - Name string `json:"name,omitempty"` - Keepfiles int `json:"keepfiles,omitempty"` -} - -type LoggingMetricsConfig struct { - Enabled bool `json:"enabled,omitempty"` -} - -type QueueConfig struct { - Mem QueueMemConfig `json:"mem,omitempty"` -} - -type QueueMemConfig struct { - Events int `json:"events,omitempty"` - Flush FlushConfig `json:"flush,omitempty"` -} - -type FlushConfig struct { - MinEvents int `json:"min_events,omitempty"` - Timeout string `json:"timeout,omitempty"` -} + // Build the default configuration + defaultCfg, err := settings.ParseConfig(DefaultConfiguration) + if err != nil { + return nil, err + } -type ElasticsearchOutputConfig struct { - Hosts []string `json:"hosts,omitempty"` - SSL TLSConfig `json:"ssl,omitempty"` - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Headers map[string]string `json:"headers,omitempty"` - Worker int `json:"worker,omitempty"` - MaxBulkSize int `json:"max_bulk_size,omitempty"` - CompressionLevel int `json:"compression_level,omitempty"` - Indices []json.RawMessage `json:"indices,omitempty"` + // Merge the configuration with userSettings last so they take precedence. + err = cfg.MergeWith( + defaultCfg, + settings.MustCanonicalConfig( + map[string]interface{}{ + "output.elasticsearch.hosts": as.Spec.Output.Elasticsearch.Hosts, + "output.elasticsearch.username": username, + "output.elasticsearch.password": password, + "output.elasticsearch.ssl.certificate_authorities": []string{filepath.Join(CertificatesDir, certificates.CertFileName)}, + }, + ), + userSettings, + ) + return cfg, nil } diff --git a/operators/pkg/controller/apmserver/config/reconcile.go b/operators/pkg/controller/apmserver/config/reconcile.go new file mode 100644 index 0000000000..5557c0030a --- /dev/null +++ b/operators/pkg/controller/apmserver/config/reconcile.go @@ -0,0 +1,81 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package config + +import ( + "reflect" + + "github.com/elastic/cloud-on-k8s/operators/pkg/apis/apm/v1alpha1" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/apmserver/labels" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/apmserver/name" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/reconciler" + "github.com/elastic/cloud-on-k8s/operators/pkg/utils/k8s" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" +) + +const ApmCfgSecretKey = "apm-server.yml" + +var log = logf.Log.WithName("apmserver-config") + +// Reconcile reconciles the configuration of the APM server: it first creates the configuration from the APM +// specification and then reconcile the underlying secret. +func Reconcile(client k8s.Client, scheme *runtime.Scheme, as *v1alpha1.ApmServer) (*corev1.Secret, error) { + + // Create a new configuration from the APM object spec. + cfg, err := NewConfigFromSpec(client, *as) + if err != nil { + return nil, err + } + + cfgBytes, err := cfg.Render() + if err != nil { + return nil, err + } + + // Reconcile the configuration in a secret + expectedConfigSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: as.Namespace, + Name: name.Config(as.Name), + Labels: labels.NewLabels(as.Name), + }, + Data: map[string][]byte{ + ApmCfgSecretKey: cfgBytes, + }, + } + + reconciledConfigSecret := &corev1.Secret{} + if err := reconciler.ReconcileResource( + reconciler.Params{ + Client: client, + Scheme: scheme, + + Owner: as, + Expected: expectedConfigSecret, + Reconciled: reconciledConfigSecret, + + NeedsUpdate: func() bool { + return !reflect.DeepEqual(reconciledConfigSecret.Data, expectedConfigSecret.Data) || + !reflect.DeepEqual(reconciledConfigSecret.Labels, expectedConfigSecret.Labels) + }, + UpdateReconciled: func() { + reconciledConfigSecret.Labels = expectedConfigSecret.Labels + reconciledConfigSecret.Data = expectedConfigSecret.Data + }, + PreCreate: func() { + log.Info("Creating config secret", "name", expectedConfigSecret.Name) + }, + PreUpdate: func() { + log.Info("Updating config secret", "name", expectedConfigSecret.Name) + }, + }, + ); err != nil { + return nil, err + } + return reconciledConfigSecret, nil +} diff --git a/operators/pkg/controller/apmserver/deployment.go b/operators/pkg/controller/apmserver/deployment.go deleted file mode 100644 index 3227359f22..0000000000 --- a/operators/pkg/controller/apmserver/deployment.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package apmserver - -import ( - "github.com/elastic/cloud-on-k8s/operators/pkg/apis/apm/v1alpha1" - "github.com/elastic/cloud-on-k8s/operators/pkg/utils/stringsutil" -) - -func PseudoNamespacedResourceName(as v1alpha1.ApmServer) string { - return stringsutil.Concat(as.Name, "-apm-server") -} diff --git a/operators/pkg/controller/apmserver/deployment_control.go b/operators/pkg/controller/apmserver/deployment_control.go index c6d421818d..4bf7975f99 100644 --- a/operators/pkg/controller/apmserver/deployment_control.go +++ b/operators/pkg/controller/apmserver/deployment_control.go @@ -63,10 +63,9 @@ func (r *ReconcileApmServer) ReconcileDeployment(expected appsv1.Deployment, own !reflect.DeepEqual(expected.Spec.Template.ObjectMeta, reconciled.Spec.Template.ObjectMeta) || !reflect.DeepEqual(expected.Spec.Template.Spec.Containers[0].Name, reconciled.Spec.Template.Spec.Containers[0].Name) || !reflect.DeepEqual(expected.Spec.Template.Spec.Containers[0].Env, reconciled.Spec.Template.Spec.Containers[0].Env) || - !reflect.DeepEqual(expected.Spec.Template.Spec.Containers[0].Image, reconciled.Spec.Template.Spec.Containers[0].Image) - // TODO: do something better than reflect.DeepEqual above? - // TODO: containers[0] is a bit flaky - // TODO: technically not only the Spec may be different, but deployment labels etc. + !reflect.DeepEqual(expected.Spec.Template.Spec.Containers[0].Image, reconciled.Spec.Template.Spec.Containers[0].Image) || + !reflect.DeepEqual(expected.Spec.Template.Spec.InitContainers, reconciled.Spec.Template.Spec.InitContainers) + // TODO: use a hash }, UpdateReconciled: func() { // Update the found object and write the result back if there are any changes diff --git a/operators/pkg/controller/apmserver/labels.go b/operators/pkg/controller/apmserver/labels/labels.go similarity index 97% rename from operators/pkg/controller/apmserver/labels.go rename to operators/pkg/controller/apmserver/labels/labels.go index 7af9b5622e..296d68e71e 100644 --- a/operators/pkg/controller/apmserver/labels.go +++ b/operators/pkg/controller/apmserver/labels/labels.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package apmserver +package labels import "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common" diff --git a/operators/pkg/controller/apmserver/name/name.go b/operators/pkg/controller/apmserver/name/name.go new file mode 100644 index 0000000000..0a5ed03ef3 --- /dev/null +++ b/operators/pkg/controller/apmserver/name/name.go @@ -0,0 +1,43 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package name + +import ( + common_name "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/name" +) + +const ( + // APM name, used as prefix, is limited to 36 characters, + MaxAPMNameLength = 36 + // this leaves common_name.MaxNameLength - 36 characters for a suffix. + MaxSuffixLength = common_name.MaxNameLength - MaxAPMNameLength + + secretTokenSuffix = "token" + httpServiceSuffix = "http" + configSuffix = "config" + deploymentSuffix = "server" +) + +// KBNamer is a Namer that is configured with the defaults for resources related to an APM resource. +var APMNamer = common_name.Namer{ + MaxSuffixLength: MaxSuffixLength, + DefaultSuffixes: []string{"apm"}, +} + +func SecretToken(apmName string) string { + return APMNamer.Suffix(apmName, secretTokenSuffix) +} + +func HTTPService(apmName string) string { + return APMNamer.Suffix(apmName, httpServiceSuffix) +} + +func Deployment(apmName string) string { + return APMNamer.Suffix(apmName, deploymentSuffix) +} + +func Config(apmName string) string { + return APMNamer.Suffix(apmName, configSuffix) +} diff --git a/operators/pkg/controller/apmserver/pod.go b/operators/pkg/controller/apmserver/pod.go index 682485fef9..1fc57ba92e 100644 --- a/operators/pkg/controller/apmserver/pod.go +++ b/operators/pkg/controller/apmserver/pod.go @@ -5,14 +5,17 @@ package apmserver import ( - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/intstr" + "path/filepath" + "strings" "github.com/elastic/cloud-on-k8s/operators/pkg/apis/apm/v1alpha1" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/apmserver/config" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/association/keystore" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/defaults" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/volume" "github.com/elastic/cloud-on-k8s/operators/pkg/utils/stringsutil" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" ) const ( @@ -22,6 +25,9 @@ const ( defaultImageRepositoryAndName string = "docker.elastic.co/apm/apm-server" SecretTokenKey string = "secret-token" + + DataVolumePath = ApmBaseDir + "/data" + ConfigVolumePath = ApmBaseDir + "/config" ) var readinessProbe = corev1.Probe{ @@ -50,7 +56,7 @@ var command = []string{ "-c", "config/config-secret/apm-server.yml", } -var configVolume = volume.NewEmptyDirVolume("config-volume", "/usr/share/apm-server/config") +var configVolume = volume.NewEmptyDirVolume("config-volume", ConfigVolumePath) type PodSpecParams struct { Version string @@ -60,17 +66,19 @@ type PodSpecParams struct { ApmServerSecret corev1.Secret ConfigSecret corev1.Secret + + keystoreResources *keystore.Resources } func imageWithVersion(image string, version string) string { return stringsutil.Concat(image, ":", version) } -func NewPodSpec(p PodSpecParams) corev1.PodTemplateSpec { +func newPodSpec(as *v1alpha1.ApmServer, p PodSpecParams) corev1.PodTemplateSpec { configSecretVolume := volume.NewSecretVolumeWithMountPath( p.ConfigSecret.Name, "config", - "/usr/share/apm-server/config/config-secret", + filepath.Join(ConfigVolumePath, "config-secret"), ) env := []corev1.EnvVar{ @@ -91,7 +99,7 @@ func NewPodSpec(p PodSpecParams) corev1.PodTemplateSpec { }, } - return defaults.NewPodTemplateBuilder( + builder := defaults.NewPodTemplateBuilder( p.PodTemplate, v1alpha1.APMServerContainerName). WithDockerImage(p.CustomImageName, imageWithVersion(defaultImageRepositoryAndName, p.Version)). WithReadinessProbe(readinessProbe). @@ -99,6 +107,18 @@ func NewPodSpec(p PodSpecParams) corev1.PodTemplateSpec { WithCommand(command). WithVolumes(configVolume.Volume(), configSecretVolume.Volume()). WithVolumeMounts(configVolume.VolumeMount(), configSecretVolume.VolumeMount()). - WithEnv(env...). - PodTemplate + WithEnv(env...) + + if p.keystoreResources != nil { + dataVolume := keystore.DataVolume( + strings.ToLower(as.Kind), + DataVolumePath, + ) + builder.WithInitContainers(p.keystoreResources.InitContainer). + WithVolumes(p.keystoreResources.Volume, dataVolume.Volume()). + WithVolumeMounts(dataVolume.VolumeMount()). + WithInitContainerDefaults() + } + + return builder.PodTemplate } diff --git a/operators/pkg/controller/apmserver/pod_test.go b/operators/pkg/controller/apmserver/pod_test.go index 5a5a8af3ce..3a087045a0 100644 --- a/operators/pkg/controller/apmserver/pod_test.go +++ b/operators/pkg/controller/apmserver/pod_test.go @@ -8,11 +8,10 @@ import ( "reflect" "testing" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/elastic/cloud-on-k8s/operators/pkg/apis/apm/v1alpha1" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/volume" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestNewPodSpec(t *testing.T) { @@ -24,11 +23,21 @@ func TestNewPodSpec(t *testing.T) { varFalse := false tests := []struct { name string + as v1alpha1.ApmServer p PodSpecParams want corev1.PodTemplateSpec }{ { name: "create default pod spec", + as: v1alpha1.ApmServer{ + TypeMeta: metav1.TypeMeta{ + Kind: "ApmServer", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-apm", + Namespace: "default", + }, + }, p: PodSpecParams{ Version: "7.0.1", ConfigSecret: corev1.Secret{ @@ -84,7 +93,7 @@ func TestNewPodSpec(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := NewPodSpec(tt.p); !reflect.DeepEqual(got, tt.want) { + if got := newPodSpec(&tt.as, tt.p); !reflect.DeepEqual(got, tt.want) { t.Errorf("NewPodSpec() = %v, want %v", got, tt.want) } }) diff --git a/operators/pkg/controller/apmserver/services.go b/operators/pkg/controller/apmserver/services.go index 5ffe36a42c..3bb720abf6 100644 --- a/operators/pkg/controller/apmserver/services.go +++ b/operators/pkg/controller/apmserver/services.go @@ -6,8 +6,9 @@ package apmserver import ( "github.com/elastic/cloud-on-k8s/operators/pkg/apis/apm/v1alpha1" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/apmserver/labels" + apmname "github.com/elastic/cloud-on-k8s/operators/pkg/controller/apmserver/name" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/defaults" - corev1 "k8s.io/api/core/v1" ) @@ -18,9 +19,9 @@ func NewService(as v1alpha1.ApmServer) *corev1.Service { } svc.ObjectMeta.Namespace = as.Namespace - svc.ObjectMeta.Name = PseudoNamespacedResourceName(as) + svc.ObjectMeta.Name = apmname.HTTPService(as.Name) - labels := NewLabels(as.Name) + labels := labels.NewLabels(as.Name) ports := []corev1.ServicePort{ { Protocol: corev1.ProtocolTCP, diff --git a/operators/pkg/controller/apmserver/state.go b/operators/pkg/controller/apmserver/state.go index f6b82273ea..64ba2f9fb5 100644 --- a/operators/pkg/controller/apmserver/state.go +++ b/operators/pkg/controller/apmserver/state.go @@ -43,3 +43,11 @@ func (s State) UpdateApmServerState(deployment v1.Deployment, apmServerSecret co func (s State) UpdateApmServerExternalService(svc corev1.Service) { s.ApmServer.Status.ExternalService = svc.Name } + +func (s *State) UpdateApmServerControllerVersion(version string) { + s.ApmServer.Status.ControllerVersion = version +} + +func (s *State) GetApmServerControllerVersion() string { + return s.ApmServer.Status.ControllerVersion +} diff --git a/operators/pkg/controller/apmserverelasticsearchassociation/apmserverelasticsearchassociation_controller.go b/operators/pkg/controller/apmserverelasticsearchassociation/apmserverelasticsearchassociation_controller.go index a8512d2eef..b03e6f6886 100644 --- a/operators/pkg/controller/apmserverelasticsearchassociation/apmserverelasticsearchassociation_controller.go +++ b/operators/pkg/controller/apmserverelasticsearchassociation/apmserverelasticsearchassociation_controller.go @@ -111,9 +111,9 @@ func (r *ReconcileApmServerElasticsearchAssociation) Reconcile(request reconcile // atomically update the iteration to support concurrent runs. currentIteration := atomic.AddInt64(&r.iteration, 1) iterationStartTime := time.Now() - log.Info("Start reconcile iteration", "iteration", currentIteration) + log.Info("Start reconcile iteration", "iteration", currentIteration, "namespace", request.Namespace, "as_name", request.Name) defer func() { - log.Info("End reconcile iteration", "iteration", currentIteration, "took", time.Since(iterationStartTime)) + log.Info("End reconcile iteration", "iteration", currentIteration, "took", time.Since(iterationStartTime), "namespace", request.Namespace, "as_name", request.Name) }() var apmServer apmtype.ApmServer @@ -127,7 +127,7 @@ func (r *ReconcileApmServerElasticsearchAssociation) Reconcile(request reconcile } if common.IsPaused(apmServer.ObjectMeta) { - log.Info("Paused : skipping reconciliation", "iteration", currentIteration) + log.Info("Object is paused. Skipping reconciliation", "namespace", apmServer.Namespace, "as_name", apmServer.Name, "iteration", currentIteration) return common.PauseRequeue, nil } @@ -247,14 +247,14 @@ func (r *ReconcileApmServerElasticsearchAssociation) reconcileInternal(apmServer // TODO: this is a bit rough if !reflect.DeepEqual(apmServer.Spec.Output.Elasticsearch, expectedEsConfig) { apmServer.Spec.Output.Elasticsearch = expectedEsConfig - log.Info("Updating Apm Server spec with Elasticsearch output configuration") + log.Info("Updating Apm Server spec with Elasticsearch output configuration", "namespace", apmServer.Namespace, "as_name", apmServer.Name) if err := r.Update(&apmServer); err != nil { return commonv1alpha1.AssociationPending, err } } if err := deleteOrphanedResources(r, apmServer); err != nil { - log.Error(err, "Error while trying to delete orphaned resources. Continuing.") + log.Error(err, "Error while trying to delete orphaned resources. Continuing.", "namespace", apmServer.Namespace, "as_name", apmServer.Name) } return commonv1alpha1.AssociationEstablished, nil @@ -274,7 +274,7 @@ func deleteOrphanedResources(c k8s.Client, apm apmtype.ApmServer) error { for _, s := range secrets.Items { controlledBy := metav1.IsControlledBy(&s, &apm) if controlledBy && !apm.Spec.Output.Elasticsearch.ElasticsearchRef.IsDefined() { - log.Info("Deleting", "secret", k8s.ExtractNamespacedName(&s)) + log.Info("Deleting secret", "namespace", s.Namespace, "secret_name", s.Name, "as_name", apm.Name) if err := c.Delete(&s); err != nil { return err } diff --git a/operators/pkg/controller/apmserverelasticsearchassociation/user.go b/operators/pkg/controller/apmserverelasticsearchassociation/user.go index e0fd4a3c62..e5c8af5704 100644 --- a/operators/pkg/controller/apmserverelasticsearchassociation/user.go +++ b/operators/pkg/controller/apmserverelasticsearchassociation/user.go @@ -9,7 +9,7 @@ import ( apmtype "github.com/elastic/cloud-on-k8s/operators/pkg/apis/apm/v1alpha1" "github.com/elastic/cloud-on-k8s/operators/pkg/apis/elasticsearch/v1alpha1" - "github.com/elastic/cloud-on-k8s/operators/pkg/controller/apmserver" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/apmserver/labels" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/reconciler" common "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/user" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/elasticsearch/label" @@ -73,7 +73,7 @@ func reconcileEsUser(c k8s.Client, s *runtime.Scheme, apm apmtype.ApmServer, es // TODO: more flexible user-name (suffixed-trimmed?) so multiple associations do not conflict pw := common.RandomPasswordBytes() // the secret will be on the Apm side of the association so we are applying the Apm labels here - secretLabels := apmserver.NewLabels(apm.Name) + secretLabels := labels.NewLabels(apm.Name) secretLabels[AssociationLabelName] = apm.Name // add ES labels for k, v := range label.NewLabels(apm.Spec.Output.Elasticsearch.ElasticsearchRef.NamespacedName()) { diff --git a/operators/pkg/controller/common/annotation/pod.go b/operators/pkg/controller/common/annotation/pod.go index 5896343fd9..44282b5bc1 100644 --- a/operators/pkg/controller/common/annotation/pod.go +++ b/operators/pkg/controller/common/annotation/pod.go @@ -15,6 +15,7 @@ import ( ) const ( + // UpdateAnnotation is the name of the annotation applied to pods to force kubelet to resync secrets UpdateAnnotation = "update.k8s.elastic.co/timestamp" ) @@ -52,10 +53,10 @@ func MarkPodAsUpdated( pod corev1.Pod, ) { log.V(1).Info( - "Update annotation on pod", + "Updating annotation on pod", "annotation", UpdateAnnotation, "namespace", pod.Namespace, - "pod", pod.Name, + "pod_name", pod.Name, ) if pod.Annotations == nil { pod.Annotations = map[string]string{} @@ -64,12 +65,13 @@ func MarkPodAsUpdated( time.Now().Format(time.RFC3339Nano) // nano should be enough to avoid collisions and keep it readable by a human. if err := c.Update(&pod); err != nil { if errors.IsConflict(err) { - log.V(1).Info("Conflict while updating pod annotation") + // Conflicts are expected and will be handled on the next reconcile loop, no need to error out here + log.V(1).Info("Conflict while updating pod annotation", "namespace", pod.Namespace, "pod_name", pod.Name) } else { log.Error(err, "failed to update pod annotation", "annotation", UpdateAnnotation, "namespace", pod.Namespace, - "pod", pod.Name) + "pod_name", pod.Name) } } } diff --git a/operators/pkg/controller/common/association/association.go b/operators/pkg/controller/common/association/association.go new file mode 100644 index 0000000000..ccaee86588 --- /dev/null +++ b/operators/pkg/controller/common/association/association.go @@ -0,0 +1,37 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package association + +import ( + "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" + "github.com/elastic/cloud-on-k8s/operators/pkg/utils/k8s" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" +) + +// ElasticsearchAuthSettings returns the user and the password to be used by an associated object to authenticate +// against an Elasticsearch cluster. +func ElasticsearchAuthSettings( + c k8s.Client, + associated v1alpha1.Associated, +) (username, password string, err error) { + auth := associated.ElasticsearchAuth() + if auth.Inline != nil { + return auth.Inline.Username, auth.Inline.Password, nil + } + + // if auth is provided via a secret, resolve credentials from it. + if auth.SecretKeyRef != nil { + secretObjKey := types.NamespacedName{Namespace: associated.GetNamespace(), Name: auth.SecretKeyRef.Name} + var secret v1.Secret + if err := c.Get(secretObjKey, &secret); err != nil { + return "", "", err + } + return auth.SecretKeyRef.Key, string(secret.Data[auth.SecretKeyRef.Key]), nil + } + + // no authentication method provided, return an empty credential + return "", "", nil +} diff --git a/operators/pkg/controller/apmserver/config/config_test.go b/operators/pkg/controller/common/association/association_test.go similarity index 64% rename from operators/pkg/controller/apmserver/config/config_test.go rename to operators/pkg/controller/common/association/association_test.go index f0e61ccddf..82d34803fb 100644 --- a/operators/pkg/controller/apmserver/config/config_test.go +++ b/operators/pkg/controller/common/association/association_test.go @@ -2,18 +2,24 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package config +package association import ( "testing" "github.com/elastic/cloud-on-k8s/operators/pkg/apis/apm/v1alpha1" + commonv1alpha1 "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" "github.com/elastic/cloud-on-k8s/operators/pkg/utils/k8s" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) +var elasticsearhInlineAuth = commonv1alpha1.ElasticsearchInlineAuth{ + Username: "foo_username", + Password: "foo_password", +} + func Test_getCredentials(t *testing.T) { type args struct { c k8s.Client @@ -45,7 +51,7 @@ func Test_getCredentials(t *testing.T) { Output: v1alpha1.Output{ Elasticsearch: v1alpha1.ElasticsearchOutput{ Hosts: []string{"https://elasticsearch-sample-es-http.default.svc:9200"}, - Auth: v1alpha1.ElasticsearchAuth{ + Auth: commonv1alpha1.ElasticsearchAuth{ SecretKeyRef: &corev1.SecretKeySelector{ Key: "elastic-internal-apm", LocalObjectReference: corev1.LocalObjectReference{ @@ -61,10 +67,40 @@ func Test_getCredentials(t *testing.T) { wantUsername: "elastic-internal-apm", wantPassword: "a2s1Nmt0N3Nwdmg4cmpqdDlucWhsN3cy", }, + { + name: "Test inline credentials", + args: args{ + c: k8s.WrapClient(fake.NewFakeClient(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "apmelasticsearchassociation-sample-elastic-internal-apm", + Namespace: "default", + }, + Data: map[string][]byte{"elastic-internal-apm": []byte("a2s1Nmt0N3Nwdmg4cmpqdDlucWhsN3cy")}, + })), + as: v1alpha1.ApmServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "apm-server-sample", + Namespace: "default", + }, + Spec: v1alpha1.ApmServerSpec{ + Output: v1alpha1.Output{ + Elasticsearch: v1alpha1.ElasticsearchOutput{ + Hosts: []string{"https://elasticsearch-sample-es-http.default.svc:9200"}, + Auth: commonv1alpha1.ElasticsearchAuth{ + Inline: &elasticsearhInlineAuth, + }, + }, + }, + }, + }, + }, + wantUsername: "foo_username", + wantPassword: "foo_password", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - gotUsername, gotPassword, err := getCredentials(tt.args.c, tt.args.as) + gotUsername, gotPassword, err := ElasticsearchAuthSettings(tt.args.c, &tt.args.as) if (err != nil) != tt.wantErr { t.Errorf("getCredentials() error = %v, wantErr %v", err, tt.wantErr) return diff --git a/operators/pkg/controller/kibana/securesettings/initcontainer.go b/operators/pkg/controller/common/association/keystore/initcontainer.go similarity index 50% rename from operators/pkg/controller/kibana/securesettings/initcontainer.go rename to operators/pkg/controller/common/association/keystore/initcontainer.go index 9ab87f42da..18d4000850 100644 --- a/operators/pkg/controller/kibana/securesettings/initcontainer.go +++ b/operators/pkg/controller/common/association/keystore/initcontainer.go @@ -2,11 +2,13 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package securesettings +package keystore import ( + "bytes" + "text/template" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/volume" - kbvolume "github.com/elastic/cloud-on-k8s/operators/pkg/controller/kibana/volume" corev1 "k8s.io/api/core/v1" ) @@ -14,32 +16,55 @@ const ( InitContainerName = "init-keystore" ) -// script is a small bash script to create a Kibana keystore, +// InitContainerParameters helps to create a valid keystore init script for Kibana or the APM server. +type InitContainerParameters struct { + // Where the user provided secured settings should be mounted + SecureSettingsVolumeMountPath string + // Where the data will be copied + DataVolumePath string + // Keystore add command + KeystoreAddCommand string + // Keystore create command + KeystoreCreateCommand string +} + +// script is a small bash script to create a Kibana or APM keystore, // then add all entries from the secure settings secret volume into it. const script = `#!/usr/bin/env bash -set -eu +set -eux -echo "Initializing Kibana keystore." +echo "Initializing keystore." # create a keystore in the default data path -./bin/kibana-keystore create +{{ .KeystoreCreateCommand }} # add all existing secret entries into it -for filename in ` + kbvolume.SecureSettingsVolumeMountPath + `/*; do +for filename in {{ .SecureSettingsVolumeMountPath }}/*; do [[ -e "$filename" ]] || continue # glob does not match key=$(basename "$filename") echo "Adding "$key" to the keystore." - ./bin/kibana-keystore add "$key" --stdin < "$filename" + {{ .KeystoreAddCommand }} "$key" --stdin < "$filename" done echo "Keystore initialization successful." ` +var scriptTemplate = template.Must(template.New("").Parse(script)) + // initContainer returns an init container that executes a bash script -// to create the Kibana Keystore. -func initContainer(secureSettingsSecret volume.SecretVolume) corev1.Container { +// to create the APM Keystore. +func initContainer( + secureSettingsSecret volume.SecretVolume, + volumePrefix string, + parameters InitContainerParameters, +) (corev1.Container, error) { privileged := false + tplBuffer := bytes.Buffer{} + if err := scriptTemplate.Execute(&tplBuffer, parameters); err != nil { + return corev1.Container{}, err + } + return corev1.Container{ // Image will be inherited from pod template defaults Kibana Docker image ImagePullPolicy: corev1.PullIfNotPresent, @@ -47,12 +72,12 @@ func initContainer(secureSettingsSecret volume.SecretVolume) corev1.Container { SecurityContext: &corev1.SecurityContext{ Privileged: &privileged, }, - Command: []string{"/usr/bin/env", "bash", "-c", script}, + Command: []string{"/usr/bin/env", "bash", "-c", tplBuffer.String()}, VolumeMounts: []corev1.VolumeMount{ // access secure settings secureSettingsSecret.VolumeMount(), - // write the keystore in Kibana data volume - kbvolume.KibanaDataVolume.VolumeMount(), + // write the keystore in the data volume + DataVolume(volumePrefix, parameters.DataVolumePath).VolumeMount(), }, - } + }, nil } diff --git a/operators/pkg/controller/common/association/keystore/resources.go b/operators/pkg/controller/common/association/keystore/resources.go new file mode 100644 index 0000000000..e114b7fa86 --- /dev/null +++ b/operators/pkg/controller/common/association/keystore/resources.go @@ -0,0 +1,64 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package keystore + +import ( + "strings" + + commonv1alpha1 "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/watches" + "github.com/elastic/cloud-on-k8s/operators/pkg/utils/k8s" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/record" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" +) + +var log = logf.Log.WithName("keystore") + +// Resources holds all the resources needed to create a keystore in Kibana or in the APM server. +type Resources struct { + // volume which contains the keystore data as provided by the user + Volume corev1.Volume + // init container used to create the keystore + InitContainer corev1.Container + // version of the secret provided by the user + Version string +} + +// NewResources optionally returns a volume and init container to include in pods, +// in order to create a Keystore from secure settings referenced in the Kibana spec. +func NewResources( + c k8s.Client, + recorder record.EventRecorder, + watches watches.DynamicWatches, + associated commonv1alpha1.Associated, + initContainerParams InitContainerParameters, +) (*Resources, error) { + // setup a volume from the user-provided secure settings secret + secretVolume, version, err := secureSettingsVolume(c, recorder, watches, associated) + if err != nil { + return nil, err + } + if secretVolume == nil { + // nothing to do + return nil, nil + } + + // build an init container to create Kibana keystore from the secure settings volume + initContainer, err := initContainer( + *secretVolume, + strings.ToLower(associated.GetObjectKind().GroupVersionKind().Kind), + initContainerParams, + ) + if err != nil { + return nil, err + } + + return &Resources{ + Volume: secretVolume.Volume(), + InitContainer: initContainer, + Version: version, + }, nil +} diff --git a/operators/pkg/controller/common/association/keystore/resources_test.go b/operators/pkg/controller/common/association/keystore/resources_test.go new file mode 100644 index 0000000000..7b4c502dde --- /dev/null +++ b/operators/pkg/controller/common/association/keystore/resources_test.go @@ -0,0 +1,154 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package keystore + +import ( + "testing" + + commonv1alpha1 "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" + "github.com/elastic/cloud-on-k8s/operators/pkg/apis/kibana/v1alpha1" + watches2 "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/watches" + "github.com/elastic/cloud-on-k8s/operators/pkg/utils/k8s" + "github.com/magiconair/properties/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +var ( + initContainersParameters = InitContainerParameters{ + KeystoreCreateCommand: "/keystore/bin/keystore create", + KeystoreAddCommand: "/keystore/bin/keystore add", + SecureSettingsVolumeMountPath: "/foo/secret", + DataVolumePath: "/bar/data", + } + + testSecureSettingsSecretName = "secure-settings-secret" + testSecureSettingsSecret = corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: testSecureSettingsSecretName, + ResourceVersion: "resource-version", + }, + } + testSecureSettingsSecretRef = commonv1alpha1.SecretRef{ + SecretName: testSecureSettingsSecretName, + } + testKibana = v1alpha1.Kibana{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "kibana", + }, + } + testKibanaWithSecureSettings = v1alpha1.Kibana{ + TypeMeta: metav1.TypeMeta{ + Kind: "kibana", + }, + ObjectMeta: testKibana.ObjectMeta, + Spec: v1alpha1.KibanaSpec{ + SecureSettings: &testSecureSettingsSecretRef, + }, + } +) + +func TestResources(t *testing.T) { + varFalse := false + tests := []struct { + name string + client k8s.Client + kb v1alpha1.Kibana + wantNil bool + wantContainers *corev1.Container + wantVersion string + }{ + { + name: "no secure settings specified: no resources", + client: k8s.WrapClient(fake.NewFakeClient()), + kb: v1alpha1.Kibana{}, + wantContainers: nil, + wantVersion: "", + wantNil: true, + }, + { + name: "secure settings specified: return volume, init container and version", + client: k8s.WrapClient(fake.NewFakeClient(&testSecureSettingsSecret)), + kb: testKibanaWithSecureSettings, + wantContainers: &corev1.Container{ + Command: []string{ + "/usr/bin/env", + "bash", + "-c", + `#!/usr/bin/env bash + +set -eux + +echo "Initializing keystore." + +# create a keystore in the default data path +/keystore/bin/keystore create + +# add all existing secret entries into it +for filename in /foo/secret/*; do + [[ -e "$filename" ]] || continue # glob does not match + key=$(basename "$filename") + echo "Adding "$key" to the keystore." + /keystore/bin/keystore add "$key" --stdin < "$filename" +done + +echo "Keystore initialization successful." +`, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "elastic-internal-secure-settings", + ReadOnly: true, + MountPath: "/mnt/elastic-internal/secure-settings", + }, + { + Name: "kibana-data", + ReadOnly: false, + MountPath: "/bar/data", + }, + }, + SecurityContext: &corev1.SecurityContext{ + Privileged: &varFalse, + }, + }, + wantVersion: testSecureSettingsSecret.ResourceVersion, + wantNil: false, + }, + { + name: "secure settings specified but secret not there: no resources", + client: k8s.WrapClient(fake.NewFakeClient()), + kb: testKibanaWithSecureSettings, + wantContainers: nil, + wantVersion: "", + wantNil: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + recorder := record.NewFakeRecorder(1000) + watches := watches2.NewDynamicWatches() + require.NoError(t, watches.InjectScheme(scheme.Scheme)) + resources, err := NewResources(tt.client, recorder, watches, &tt.kb, initContainersParameters) + require.NoError(t, err) + if tt.wantNil { + require.Nil(t, resources) + } else { + require.NotNil(t, resources) + assert.Equal(t, resources.InitContainer.Name, "init-keystore") + assert.Equal(t, resources.InitContainer.Command, tt.wantContainers.Command) + assert.Equal(t, resources.InitContainer.VolumeMounts, tt.wantContainers.VolumeMounts) + assert.Equal(t, resources.InitContainer.SecurityContext, tt.wantContainers.SecurityContext) + assert.Equal(t, resources.Version, tt.wantVersion) + } + + }) + } +} diff --git a/operators/pkg/controller/kibana/securesettings/user_secret.go b/operators/pkg/controller/common/association/keystore/user_secret.go similarity index 59% rename from operators/pkg/controller/kibana/securesettings/user_secret.go rename to operators/pkg/controller/common/association/keystore/user_secret.go index 5cd0192ea9..d6179f51e2 100644 --- a/operators/pkg/controller/kibana/securesettings/user_secret.go +++ b/operators/pkg/controller/common/association/keystore/user_secret.go @@ -2,29 +2,29 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package securesettings +package keystore import ( "fmt" + "strings" commonv1alpha1 "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" - "github.com/elastic/cloud-on-k8s/operators/pkg/apis/kibana/v1alpha1" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/events" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/finalizer" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/volume" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/watches" - kbvolume "github.com/elastic/cloud-on-k8s/operators/pkg/controller/kibana/volume" "github.com/elastic/cloud-on-k8s/operators/pkg/utils/k8s" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" ) // secureSettingsVolume creates a volume from the optional user-provided secure settings secret. // -// Secure settings are provided by the user in the Kibana Spec through a secret reference. -// This secret is mounted into Kibana pods for secure settings to be injected into Kibana keystore. +// Secure settings are provided by the user in the APM or Kibana Spec through a secret reference. +// This secret is mounted into the pods for secure settings to be injected into a keystore. // The user-provided secret is watched to reconcile on any change. // The user secret resource version is returned along with the volume, so that // any change in the user secret leads to pod rotation. @@ -32,22 +32,21 @@ func secureSettingsVolume( c k8s.Client, recorder record.EventRecorder, watches watches.DynamicWatches, - kb v1alpha1.Kibana, + associated commonv1alpha1.Associated, ) (*volume.SecretVolume, string, error) { // setup (or remove) watches for the user-provided secret to reconcile on any change - userSecretRef := kb.Spec.SecureSettings - err := watchSecureSettings(watches, userSecretRef, k8s.ExtractNamespacedName(&kb)) + err := watchSecureSettings(watches, associated.SecureSettings(), k8s.ExtractNamespacedName(associated)) if err != nil { return nil, "", err } - if userSecretRef == nil { + if associated.SecureSettings() == nil { // no secure settings secret specified return nil, "", nil } - // retrieve the secret referenced by the user in the Kibana namespace - userSecret, exists, err := retrieveUserSecret(c, kb, recorder, kb.Namespace, userSecretRef.SecretName) + // retrieve the secret referenced by the user in the same namespace + userSecret, exists, err := retrieveUserSecret(c, associated, recorder) if err != nil { return nil, "", err } @@ -59,24 +58,26 @@ func secureSettingsVolume( // build a volume from that secret secureSettingsVolume := volume.NewSecretVolumeWithMountPath( userSecret.Name, - kbvolume.SecureSettingsVolumeName, - kbvolume.SecureSettingsVolumeMountPath, + SecureSettingsVolumeName, + SecureSettingsVolumeMountPath, ) - // resource version will be included in Kibana pod labels, + // resource version will be included in pod labels, // to recreate pods on any secret change. resourceVersion := userSecret.GetResourceVersion() return &secureSettingsVolume, resourceVersion, nil } -func retrieveUserSecret(c k8s.Client, kibana v1alpha1.Kibana, recorder record.EventRecorder, namespace string, name string) (*corev1.Secret, bool, error) { +func retrieveUserSecret(c k8s.Client, associated commonv1alpha1.Associated, recorder record.EventRecorder) (*corev1.Secret, bool, error) { + secretName := associated.SecureSettings().SecretName + namespace := associated.GetNamespace() userSecret := corev1.Secret{} - err := c.Get(types.NamespacedName{Namespace: namespace, Name: name}, &userSecret) + err := c.Get(types.NamespacedName{Namespace: namespace, Name: secretName}, &userSecret) if err != nil && apierrors.IsNotFound(err) { msg := "Secure settings secret not found" - log.Info(msg, "name", name) - recorder.Event(&kibana, corev1.EventTypeWarning, events.EventReasonUnexpected, msg+": "+name) + log.Info(msg, "namespace", namespace, "secret_name", secretName) + recorder.Event(associated, corev1.EventTypeWarning, events.EventReasonUnexpected, msg+": "+secretName) return nil, false, nil } else if err != nil { return nil, false, err @@ -84,10 +85,10 @@ func retrieveUserSecret(c k8s.Client, kibana v1alpha1.Kibana, recorder record.Ev return &userSecret, true, nil } -// secureSettingsWatchName returns the watch name according to the Kibana deployment name. -// It is unique per Kibana deployment. -func secureSettingsWatchName(kibana types.NamespacedName) string { - return fmt.Sprintf("%s-%s-secure-settings", kibana.Namespace, kibana.Name) +// secureSettingsWatchName returns the watch name according to the deployment name. +// It is unique per APM or Kibana deployment. +func secureSettingsWatchName(namespacedName types.NamespacedName) string { + return fmt.Sprintf("%s-%s-secure-settings", namespacedName.Namespace, namespacedName.Name) } // watchSecureSettings registers a watch for the given secure settings. @@ -95,8 +96,8 @@ func secureSettingsWatchName(kibana types.NamespacedName) string { // Only one watch per cluster is registered: // - if it already exists with a different secret, it is replaced to watch the new secret. // - if the given user secret is nil, the watch is removed. -func watchSecureSettings(watched watches.DynamicWatches, secureSettingsRef *commonv1alpha1.SecretRef, kibana types.NamespacedName) error { - watchName := secureSettingsWatchName(kibana) +func watchSecureSettings(watched watches.DynamicWatches, secureSettingsRef *commonv1alpha1.SecretRef, nn types.NamespacedName) error { + watchName := secureSettingsWatchName(nn) if secureSettingsRef == nil { watched.Secrets.RemoveHandlerForKey(watchName) return nil @@ -104,19 +105,23 @@ func watchSecureSettings(watched watches.DynamicWatches, secureSettingsRef *comm return watched.Secrets.AddHandler(watches.NamedWatch{ Name: watchName, Watched: types.NamespacedName{ - Namespace: kibana.Namespace, + Namespace: nn.Namespace, Name: secureSettingsRef.SecretName, }, - Watcher: kibana, + Watcher: nn, }) } +func getKind(object runtime.Object) string { + return strings.ToLower(object.GetObjectKind().GroupVersionKind().Kind) +} + // Finalizer removes any dynamic watches on external user created secret. -func Finalizer(kibana types.NamespacedName, watched watches.DynamicWatches) finalizer.Finalizer { +func Finalizer(namespacedName types.NamespacedName, watched watches.DynamicWatches, object runtime.Object) finalizer.Finalizer { return finalizer.Finalizer{ - Name: "secure-settings.finalizers.kibana.k8s.elastic.co", + Name: "secure-settings.finalizers." + getKind(object) + ".k8s.elastic.co", Execute: func() error { - watched.Secrets.RemoveHandlerForKey(secureSettingsWatchName(kibana)) + watched.Secrets.RemoveHandlerForKey(secureSettingsWatchName(namespacedName)) return nil }, } diff --git a/operators/pkg/controller/kibana/securesettings/user_secret_test.go b/operators/pkg/controller/common/association/keystore/user_secret_test.go similarity index 95% rename from operators/pkg/controller/kibana/securesettings/user_secret_test.go rename to operators/pkg/controller/common/association/keystore/user_secret_test.go index a00d6e20e3..c9513e196f 100644 --- a/operators/pkg/controller/kibana/securesettings/user_secret_test.go +++ b/operators/pkg/controller/common/association/keystore/user_secret_test.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package securesettings +package keystore import ( "reflect" @@ -11,7 +11,6 @@ import ( "github.com/elastic/cloud-on-k8s/operators/pkg/apis/kibana/v1alpha1" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/volume" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/watches" - kbvolume "github.com/elastic/cloud-on-k8s/operators/pkg/controller/kibana/volume" "github.com/elastic/cloud-on-k8s/operators/pkg/utils/k8s" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/types" @@ -27,8 +26,8 @@ func Test_secureSettingsWatchName(t *testing.T) { func Test_secureSettingsVolume(t *testing.T) { expectedSecretVolume := volume.NewSecretVolumeWithMountPath( testSecureSettingsSecret.Name, - kbvolume.SecureSettingsVolumeName, - kbvolume.SecureSettingsVolumeMountPath, + SecureSettingsVolumeName, + SecureSettingsVolumeMountPath, ) createWatches := func(handlerName string) watches.DynamicWatches { w := watches.NewDynamicWatches() @@ -91,7 +90,7 @@ func Test_secureSettingsVolume(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { recorder := record.NewFakeRecorder(1000) - vol, version, err := secureSettingsVolume(tt.c, recorder, tt.w, tt.kb) + vol, version, err := secureSettingsVolume(tt.c, recorder, tt.w, &tt.kb) require.NoError(t, err) if !reflect.DeepEqual(vol, tt.wantVolume) { diff --git a/operators/pkg/controller/common/association/keystore/volumes.go b/operators/pkg/controller/common/association/keystore/volumes.go new file mode 100644 index 0000000000..66d46d7464 --- /dev/null +++ b/operators/pkg/controller/common/association/keystore/volumes.go @@ -0,0 +1,30 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package keystore + +import ( + "fmt" + + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/volume" +) + +const ( + DataVolumeNamePattern = "%s-data" + + SecureSettingsVolumeName = "elastic-internal-secure-settings" + SecureSettingsVolumeMountPath = "/mnt/elastic-internal/secure-settings" +) + +// dataVolumeName returns the volume name in which the keystore will be stored. +func dataVolumeName(prefix string) string { + return fmt.Sprintf(DataVolumeNamePattern, prefix) +} + +// DataVolume returns the volume used to propagate the keystore file from the init container to +// the server running in the main container. +// Since the APM server or Kibana are stateless and the keystore is created on pod start, an EmptyDir is fine here. +func DataVolume(prefix string, dataVolumePath string) volume.EmptyDirVolume { + return volume.NewEmptyDirVolume(dataVolumeName(prefix), dataVolumePath) +} diff --git a/operators/pkg/controller/common/certificates/ca_reconcile.go b/operators/pkg/controller/common/certificates/ca_reconcile.go index 588583c64a..12d2df14c3 100644 --- a/operators/pkg/controller/common/certificates/ca_reconcile.go +++ b/operators/pkg/controller/common/certificates/ca_reconcile.go @@ -54,7 +54,6 @@ func ReconcileCAForOwner( caType CAType, rotationParams RotationParams, ) (*CA, error) { - ownerNsn := k8s.ExtractNamespacedName(owner) // retrieve current CA secret caInternalSecret := corev1.Secret{} @@ -67,25 +66,25 @@ func ReconcileCAForOwner( return nil, err } if apierrors.IsNotFound(err) { - log.Info("No internal CA certificate Secret found, creating a new one", "owner", ownerNsn, "ca_type", caType) + log.Info("No internal CA certificate Secret found, creating a new one", "owner_namespace", owner.GetNamespace(), "owner_name", owner.GetName(), "ca_type", caType) return renewCA(cl, namer, owner, labels, rotationParams.Validity, scheme, caType) } // build CA ca := buildCAFromSecret(caInternalSecret) if ca == nil { - log.Info("Cannot build CA from secret, creating a new one", "owner", ownerNsn, "ca_type", caType) + log.Info("Cannot build CA from secret, creating a new one", "owner_namespace", owner.GetNamespace(), "owner_name", owner.GetName(), "ca_type", caType) return renewCA(cl, namer, owner, labels, rotationParams.Validity, scheme, caType) } // renew if cannot reuse if !canReuseCA(ca, rotationParams.RotateBefore) { - log.Info("Cannot reuse existing CA, creating a new one", "owner", ownerNsn, "ca_type", caType) + log.Info("Cannot reuse existing CA, creating a new one", "owner_namespace", owner.GetNamespace(), "owner_name", owner.GetName(), "ca_type", caType) return renewCA(cl, namer, owner, labels, rotationParams.Validity, scheme, caType) } // reuse existing CA - log.V(1).Info("Reusing existing CA", "owner", ownerNsn, "ca_type", caType) + log.V(1).Info("Reusing existing CA", "owner_namespace", owner.GetNamespace(), "owner_name", owner.GetName(), "ca_type", caType) return ca, nil } @@ -138,11 +137,11 @@ func canReuseCA(ca *CA, expirationSafetyMargin time.Duration) bool { func certIsValid(cert x509.Certificate, expirationSafetyMargin time.Duration) bool { now := time.Now() if now.Before(cert.NotBefore) { - log.Info("CA cert is not valid yet, will create a new one") + log.Info("CA cert is not valid yet", "subject", cert.Subject) return false } if now.After(cert.NotAfter.Add(-expirationSafetyMargin)) { - log.Info("CA cert expired or soon to expire, will create a new one", "expiration", cert.NotAfter) + log.Info("CA cert expired or soon to expire", "subject", cert.Subject, "expiration", cert.NotAfter) return false } return true @@ -181,7 +180,7 @@ func buildCAFromSecret(caInternalSecret corev1.Secret) *CA { } certs, err := ParsePEMCerts(caBytes) if err != nil { - log.Info("Cannot parse PEM cert from CA secret, will create a new one", "err", err) + log.Error(err, "Cannot parse PEM cert from CA secret, will create a new one", "namespace", caInternalSecret.Namespace, "secret_name", caInternalSecret.Name) return nil } if len(certs) == 0 { @@ -190,7 +189,8 @@ func buildCAFromSecret(caInternalSecret corev1.Secret) *CA { if len(certs) > 1 { log.Info( "More than 1 certificate in the CA secret, continuing with the first one", - "secret", caInternalSecret.Name, + "namespace", caInternalSecret.Namespace, + "secret_name", caInternalSecret.Name, ) } cert := certs[0] @@ -201,7 +201,7 @@ func buildCAFromSecret(caInternalSecret corev1.Secret) *CA { } privateKey, err := ParsePEMPrivateKey(privateKeyBytes) if err != nil { - log.Info("Cannot parse PEM private key from CA secret, will create a new one", "err", err) + log.Error(err, "Cannot parse PEM private key from CA secret, will create a new one", "namespace", caInternalSecret.Namespace, "secret_name", caInternalSecret.Name) return nil } return NewCA(privateKey, cert) diff --git a/operators/pkg/controller/common/certificates/http/reconcile.go b/operators/pkg/controller/common/certificates/http/reconcile.go index b7ec56d63e..cd422ea594 100644 --- a/operators/pkg/controller/common/certificates/http/reconcile.go +++ b/operators/pkg/controller/common/certificates/http/reconcile.go @@ -9,7 +9,6 @@ import ( "crypto/rsa" "crypto/x509" "crypto/x509/pkix" - "fmt" "net" "reflect" "strings" @@ -135,12 +134,12 @@ func reconcileHTTPInternalCertificatesSecret( if needsUpdate { if shouldCreateSecret { - log.Info("Creating HTTP internal certificate secret", "secret", secret.Name) + log.Info("Creating HTTP internal certificate secret", "namespace", secret.Namespace, "secret_name", secret.Name) if err := c.Create(&secret); err != nil { return nil, err } } else { - log.Info("Updating HTTP internal certificate secret", "secret", secret.Name) + log.Info("Updating HTTP internal certificate secret", "namespace", secret.Namespace, "secret_name", secret.Name) if err := c.Update(&secret); err != nil { return nil, err } @@ -172,7 +171,7 @@ func ensureInternalSelfSignedCertificateSecretContents( if privateKeyData, ok := secret.Data[certificates.KeyFileName]; ok { storedPrivateKey, err := certificates.ParsePEMPrivateKey(privateKeyData) if err != nil { - log.Error(err, "Unable to parse stored private key", "secret", secret.Name) + log.Error(err, "Unable to parse stored private key", "namespace", secret.Namespace, "secret_name", secret.Name) } else { needsNewPrivateKey = false privateKey = storedPrivateKey @@ -195,9 +194,10 @@ func ensureInternalSelfSignedCertificateSecretContents( if shouldIssueNewHTTPCertificate(owner, namer, tls, secret, svcs, ca, rotationParam.RotateBefore) { log.Info( "Issuing new HTTP certificate", - "secret", secret.Name, - "namespace", owner.Namespace, - "name", owner.Name, + "namespace", secret.Namespace, + "secret_name", secret.Name, + "owner_namespace", owner.Namespace, + "owner_name", owner.Name, ) csr, err := x509.CreateCertificateRequest(cryptorand.Reader, &x509.CertificateRequest{}, privateKey) @@ -263,7 +263,7 @@ func shouldIssueNewHTTPCertificate( } else { certs, err := certificates.ParsePEMCerts(certData) if err != nil { - log.Error(err, "Invalid certificate data found, issuing new certificate", "secret", secret.Name) + log.Error(err, "Invalid certificate data found, issuing new certificate", "namespace", secret.Namespace, "secret_name", secret.Name) return true } @@ -289,16 +289,20 @@ func shouldIssueNewHTTPCertificate( } if _, err := certificate.Verify(verifyOpts); err != nil { log.Info( - fmt.Sprintf("Certificate was not valid, should issue new: %s", err), + "Certificate was not valid, should issue new", + "validation_failure", err, "subject", certificate.Subject, "issuer", certificate.Issuer, "current_ca_subject", ca.Cert.Subject, + "secret_name", secret.Name, + "namespace", secret.Namespace, + "owner_name", owner.Name, ) return true } if time.Now().After(certificate.NotAfter.Add(-certReconcileBefore)) { - log.Info("Certificate soon to expire, should issue new", "secret", secret.Name) + log.Info("Certificate soon to expire, should issue new", "namespace", secret.Namespace, "secret_name", secret.Name) return true } diff --git a/operators/pkg/controller/common/certificates/x509_othername.go b/operators/pkg/controller/common/certificates/x509_othername.go index 0b470f9ec6..c8f0edd643 100644 --- a/operators/pkg/controller/common/certificates/x509_othername.go +++ b/operators/pkg/controller/common/certificates/x509_othername.go @@ -8,7 +8,6 @@ import ( "crypto/x509" "encoding/asn1" "errors" - "fmt" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" ) @@ -209,7 +208,7 @@ func ParseSANGeneralNamesOtherNamesOnly(c *x509.Certificate) ([]GeneralName, err }, }) default: - log.Info(fmt.Sprintf("Ignoring unsupported GeneralNames tag [%d]", generalName.Tag)) + log.Info("Ignoring unsupported GeneralNames tag", "tag", generalName.Tag, "subject", c.Subject) } } } diff --git a/operators/pkg/controller/common/finalizer/handler.go b/operators/pkg/controller/common/finalizer/handler.go index f09e12a7eb..4cdd959129 100644 --- a/operators/pkg/controller/common/finalizer/handler.go +++ b/operators/pkg/controller/common/finalizer/handler.go @@ -49,7 +49,7 @@ func (h *Handler) Handle(resource runtime.Object, finalizers ...Finalizer) error var finalizerErr error if metaObject.GetDeletionTimestamp().IsZero() { // resource is not being deleted, make sure all finalizers are there - needUpdate = h.reconcileFinalizers(finalizers, metaObject, resource) + needUpdate = h.reconcileFinalizers(finalizers, metaObject) } else { // resource is being deleted, let's execute finalizers needUpdate, finalizerErr = h.executeFinalizers(finalizers, metaObject, resource) @@ -62,15 +62,14 @@ func (h *Handler) Handle(resource runtime.Object, finalizers ...Finalizer) error return finalizerErr } -// ReconcileFinalizers makes sure all finalizers exist in the given objectMeta. -// If some finalizers need to be added to objectMeta, -// an update to the apiserver will be issued for the given resource. -func (h *Handler) reconcileFinalizers(finalizers []Finalizer, object metav1.Object, resource runtime.Object) bool { +// reconcileFinalizers ensures all finalizers exist in the given objectMeta. +// Returns a bool indicating if an update is required to the object +func (h *Handler) reconcileFinalizers(finalizers []Finalizer, object metav1.Object) bool { needUpdate := false for _, finalizer := range finalizers { // add finalizer if not already there if !stringsutil.StringInSlice(finalizer.Name, object.GetFinalizers()) { - log.Info("Registering finalizer", "name", finalizer.Name) + log.Info("Registering finalizer", "finalizer_name", finalizer.Name, "namespace", object.GetNamespace(), "name", object.GetName()) object.SetFinalizers(append(object.GetFinalizers(), finalizer.Name)) needUpdate = true } @@ -87,7 +86,7 @@ func (h *Handler) executeFinalizers(finalizers []Finalizer, object metav1.Object for _, finalizer := range finalizers { // for each registered finalizer, execute it, then remove from the list if stringsutil.StringInSlice(finalizer.Name, object.GetFinalizers()) { - log.Info("Executing finalizer", "name", finalizer.Name) + log.Info("Executing finalizer", "finalizer_name", finalizer.Name, "namespace", object.GetNamespace(), "name", object.GetName()) if finalizerErr = finalizer.Execute(); finalizerErr != nil { break } diff --git a/operators/pkg/controller/common/reconciler/reconciler.go b/operators/pkg/controller/common/reconciler/reconciler.go index 0bd129e7d3..3a3020ff4f 100644 --- a/operators/pkg/controller/common/reconciler/reconciler.go +++ b/operators/pkg/controller/common/reconciler/reconciler.go @@ -94,7 +94,7 @@ func ReconcileResource(params Params) error { err = params.Client.Get(types.NamespacedName{Name: name, Namespace: namespace}, params.Reconciled) if err != nil && apierrors.IsNotFound(err) { // Create if needed - log.Info(fmt.Sprintf("Creating %s %s/%s", kind, namespace, name)) + log.Info("Creating resource", "kind", kind, "namespace", namespace, "name", name) if params.PreCreate != nil { params.PreCreate() } @@ -118,7 +118,7 @@ func ReconcileResource(params Params) error { // Update if needed if params.NeedsUpdate() { - log.Info(fmt.Sprintf("Updating %s %s/%s ", kind, namespace, name)) + log.Info("Updating resource", "kind", kind, "namespace", namespace, "name", name) if params.PreUpdate != nil { params.PreUpdate() } diff --git a/operators/pkg/controller/common/reconciler/results.go b/operators/pkg/controller/common/reconciler/results.go index 9f2e7a7af9..218c22bc23 100644 --- a/operators/pkg/controller/common/reconciler/results.go +++ b/operators/pkg/controller/common/reconciler/results.go @@ -5,10 +5,7 @@ package reconciler import ( - "fmt" - k8serrors "k8s.io/apimachinery/pkg/util/errors" - "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -54,7 +51,7 @@ func (r *Results) WithResult(res reconcile.Result) *Results { func (r *Results) Apply(step string, recoverableStep func() (reconcile.Result, error)) *Results { result, err := recoverableStep() if err != nil { - log.Error(err, fmt.Sprintf("Error during %s, continuing", step)) + log.Error(err, "Recoverable error during step, continuing", "step", step) } return r.WithError(err).WithResult(result) } @@ -69,7 +66,7 @@ func (r *Results) Aggregate() (reconcile.Result, error) { current = next } } - log.Info(fmt.Sprintf("Aggregate reconcile result is %+v", current)) + log.Info("Aggregated reconciliation results complete", "result", current) return current, k8serrors.NewAggregate(r.errors) } diff --git a/operators/pkg/controller/common/settings/canonical_config.go b/operators/pkg/controller/common/settings/canonical_config.go index 0124c157f5..6a395fcf36 100644 --- a/operators/pkg/controller/common/settings/canonical_config.go +++ b/operators/pkg/controller/common/settings/canonical_config.go @@ -30,9 +30,19 @@ func NewCanonicalConfig() *CanonicalConfig { return fromConfig(ucfg.New()) } -// NewCanonicalConfigFrom creates a new config from the API type. +// NewCanonicalConfigFrom creates a new config from the API type after normalizing the data. func NewCanonicalConfigFrom(data untypedDict) (*CanonicalConfig, error) { - config, err := ucfg.NewFrom(data, Options...) + // not great: round trip through yaml to normalize untyped dict before creating config + // to avoid numeric differences in configs due to JSON marshalling/deep copies being restricted to float + bytes, err := yaml.Marshal(data) + if err != nil { + return nil, err + } + var normalized untypedDict + if err := yaml.Unmarshal(bytes, &normalized); err != nil { + return nil, err + } + config, err := ucfg.NewFrom(normalized, Options...) if err != nil { return nil, err } diff --git a/operators/pkg/controller/common/settings/canonical_config_test.go b/operators/pkg/controller/common/settings/canonical_config_test.go index f6d43b917d..1d60816614 100644 --- a/operators/pkg/controller/common/settings/canonical_config_test.go +++ b/operators/pkg/controller/common/settings/canonical_config_test.go @@ -409,3 +409,42 @@ func TestCanonicalConfig_SetStrings(t *testing.T) { }) } } + +func TestNewCanonicalConfigFrom(t *testing.T) { + type args struct { + data untypedDict + } + tests := []struct { + name string + args args + want *CanonicalConfig + wantErr bool + }{ + { + name: "should normalize numeric types", + args: args{ + data: map[string]interface{}{ + "a": float64(1), // after json round trip or deep copy typically a float + "b": 1.2, + }, + }, + want: MustCanonicalConfig(map[string]interface{}{ + "a": 1, + "b": 1.2, + }), + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := NewCanonicalConfigFrom(tt.args.data) + if (err != nil) != tt.wantErr { + t.Errorf("NewCanonicalConfigFrom() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := got.Diff(tt.want, nil); len(diff) > 0 { + t.Error(diff) + } + }) + } +} diff --git a/operators/pkg/controller/common/watches/expectations_watch.go b/operators/pkg/controller/common/watches/expectations_watch.go index 14eb57c8fb..9f91cecae5 100644 --- a/operators/pkg/controller/common/watches/expectations_watch.go +++ b/operators/pkg/controller/common/watches/expectations_watch.go @@ -54,7 +54,7 @@ func (p *ExpectationsWatch) Create(evt event.CreateEvent, q workqueue.RateLimiti resource, exists := p.resourceRetriever(evt.Meta) if exists { p.expectations.CreationObserved(resource) - log.V(4).Info("Marking creation observed in expectations", "resource", resource) + log.V(1).Info("Marking creation observed in expectations", "name", resource.Name, "namespace", resource.Namespace) } } @@ -63,7 +63,7 @@ func (p *ExpectationsWatch) Delete(evt event.DeleteEvent, q workqueue.RateLimiti resource, exists := p.resourceRetriever(evt.Meta) if exists { p.expectations.DeletionObserved(resource) - log.V(4).Info("Marking deletion observed in expectations", "resource", resource) + log.V(1).Info("Marking deletion observed in expectations", "name", resource.Name, "namespace", resource.Namespace) } } diff --git a/operators/pkg/controller/common/watches/handler.go b/operators/pkg/controller/common/watches/handler.go index 3c0fd6dad7..e782173d44 100644 --- a/operators/pkg/controller/common/watches/handler.go +++ b/operators/pkg/controller/common/watches/handler.go @@ -63,7 +63,7 @@ func (d *DynamicEnqueueRequest) AddHandler(handler HandlerRegistration) error { defer d.mutex.Unlock() inject.SchemeInto(d.scheme, handler) d.registrations[handler.Key()] = handler - log.V(4).Info("Added new handler registration", "Now", d.registrations) + log.V(1).Info("Added new handler registration", "current_registrations", d.registrations) return nil } @@ -77,7 +77,7 @@ func (d *DynamicEnqueueRequest) RemoveHandlerForKey(key string) { d.mutex.Lock() defer d.mutex.Unlock() delete(d.registrations, key) - log.V(4).Info("Removed handler registration", "removed", key, "now", d.registrations) + log.V(1).Info("Removed handler registration", "removed", key, "current_registrations", d.registrations) } // Registrations returns the list of registered handler names. diff --git a/operators/pkg/controller/common/watches/named_watch.go b/operators/pkg/controller/common/watches/named_watch.go index f9705d2fa9..4103f971ef 100644 --- a/operators/pkg/controller/common/watches/named_watch.go +++ b/operators/pkg/controller/common/watches/named_watch.go @@ -28,32 +28,32 @@ var _ handler.EventHandler = &NamedWatch{} func (w NamedWatch) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { for _, req := range w.toReconcileRequest(evt.Meta) { - log.V(4).Info("Create event transformed", "key", w.Key()) + log.V(1).Info("Create event transformed", "key", w.Key()) q.Add(req) } } func (w NamedWatch) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { for _, req := range w.toReconcileRequest(evt.MetaOld) { - log.V(4).Info("Update event transformed (old)", "key", w.Key()) + log.V(1).Info("Update event transformed (old)", "key", w.Key()) q.Add(req) } for _, req := range w.toReconcileRequest(evt.MetaNew) { - log.V(4).Info("Update event transformed (new)", "key", w.Key()) + log.V(1).Info("Update event transformed (new)", "key", w.Key()) q.Add(req) } } func (w NamedWatch) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { for _, req := range w.toReconcileRequest(evt.Meta) { - log.V(4).Info("Delete event transformed", "key", w.Key()) + log.V(1).Info("Delete event transformed", "key", w.Key()) q.Add(req) } } func (w NamedWatch) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { for _, req := range w.toReconcileRequest(evt.Meta) { - log.V(4).Info("Generic event transformed", "key", w.Key()) + log.V(1).Info("Generic event transformed", "key", w.Key()) q.Add(req) } } diff --git a/operators/pkg/controller/elasticsearch/certificates/transport/reconcile.go b/operators/pkg/controller/elasticsearch/certificates/transport/reconcile.go index bb7601ac9d..a0a6a4d31d 100644 --- a/operators/pkg/controller/elasticsearch/certificates/transport/reconcile.go +++ b/operators/pkg/controller/elasticsearch/certificates/transport/reconcile.go @@ -37,7 +37,7 @@ func ReconcileTransportCertificatesSecrets( services []corev1.Service, rotationParams certificates.RotationParams, ) (reconcile.Result, error) { - log.Info("Reconciling transport certificates secrets") + log.Info("Reconciling transport certificate secrets", "namespace", es.Namespace, "es_name", es.Name) var pods corev1.PodList if err := c.List(&client.ListOptions{ @@ -56,7 +56,7 @@ func ReconcileTransportCertificatesSecrets( for _, pod := range pods.Items { if pod.Status.PodIP == "" { - log.Info("Skipping pod because it has no IP yet", "pod", pod.Name) + log.Info("Skipping pod because it has no IP yet", "namespace", pod.Namespace, "pod_name", pod.Name) continue } @@ -85,7 +85,7 @@ func ReconcileTransportCertificatesSecrets( } } if len(keysToPrune) > 0 { - log.Info("Pruning keys from certificates secret", "keys", keysToPrune) + log.Info("Pruning keys from certificates secret", "namespace", es.Namespace, "secret_name", secret.Name, "keys", keysToPrune) for _, keyToRemove := range keysToPrune { delete(secret.Data, keyToRemove) diff --git a/operators/pkg/controller/elasticsearch/driver/default.go b/operators/pkg/controller/elasticsearch/driver/default.go index b162617920..0557e25725 100644 --- a/operators/pkg/controller/elasticsearch/driver/default.go +++ b/operators/pkg/controller/elasticsearch/driver/default.go @@ -176,6 +176,8 @@ func (d *defaultDriver) Reconcile( min = &d.Version } + warnUnsupportedDistro(resourcesState.AllPods, reconcileState.Recorder) + observedState := d.observedStateResolver( k8s.ExtractNamespacedName(&es), certificateResources.TrustedHTTPCertificates, @@ -384,7 +386,7 @@ func (d *defaultDriver) Reconcile( // cannot be reached, hence we cannot delete pods. // Probably it was just created and is not ready yet. // Let's retry in a while. - log.Info("ES external service not ready yet for shard migration reconciliation. Requeuing.") + log.Info("ES external service not ready yet for shard migration reconciliation. Requeuing.", "namespace", es.Namespace, "es_name", es.Name) reconcileState.UpdateElasticsearchPending(resourcesState.CurrentPods.Pods()) @@ -621,7 +623,7 @@ func (d *defaultDriver) scaleStatefulSetDown( if sset.Replicas(*statefulSet) == 0 && targetReplicas == 0 { // we don't expect any new replicas in this statefulset, remove it - logger.Info("Deleting statefulset") + logger.Info("Deleting statefulset", "namespace", statefulSet.Namespace, "name", statefulSet.Name) if err := d.Client.Delete(statefulSet); err != nil { return results.WithError(err) } @@ -735,3 +737,18 @@ func reconcileScriptsConfigMap(c k8s.Client, scheme *runtime.Scheme, es v1alpha1 return nil } + +// warnUnsupportedDistro sends an event of type warning if the Elasticsearch Docker image is not a supported +// distribution by looking at if the prepare fs init container terminated with the UnsupportedDistro exit code. +func warnUnsupportedDistro(pods []corev1.Pod, recorder *events.Recorder) { + for _, p := range pods { + for _, s := range p.Status.InitContainerStatuses { + state := s.LastTerminationState.Terminated + if s.Name == initcontainer.PrepareFilesystemContainerName && + state != nil && state.ExitCode == initcontainer.UnsupportedDistroExitCode { + recorder.AddEvent(corev1.EventTypeWarning, events.EventReasonUnexpected, + "Unsupported distribution") + } + } + } +} diff --git a/operators/pkg/controller/elasticsearch/elasticsearch_controller.go b/operators/pkg/controller/elasticsearch/elasticsearch_controller.go index a863144ff6..002075b076 100644 --- a/operators/pkg/controller/elasticsearch/elasticsearch_controller.go +++ b/operators/pkg/controller/elasticsearch/elasticsearch_controller.go @@ -5,7 +5,6 @@ package elasticsearch import ( - "fmt" "sync/atomic" "time" @@ -181,9 +180,9 @@ func (r *ReconcileElasticsearch) Reconcile(request reconcile.Request) (reconcile // atomically update the iteration to support concurrent runs. currentIteration := atomic.AddInt64(&r.iteration, 1) iterationStartTime := time.Now() - log.Info("Start reconcile iteration", "iteration", currentIteration, "request", request) + log.Info("Start reconcile iteration", "iteration", currentIteration, "namespace", request.Namespace, "es_name", request.Name) defer func() { - log.Info("End reconcile iteration", "iteration", currentIteration, "took", time.Since(iterationStartTime), "request", request) + log.Info("End reconcile iteration", "iteration", currentIteration, "took", time.Since(iterationStartTime), "namespace", request.Namespace, "es_ame", request.Name) }() // Fetch the Elasticsearch instance @@ -200,15 +199,16 @@ func (r *ReconcileElasticsearch) Reconcile(request reconcile.Request) (reconcile } if common.IsPaused(es.ObjectMeta) { - log.Info("Paused : skipping reconciliation", "iteration", currentIteration) + log.Info("Object is paused. Skipping reconciliation", "namespace", es.Namespace, "es_name", es.Name, "iteration", currentIteration) return common.PauseRequeue, nil } state := esreconcile.NewState(es) + state.UpdateElasticsearchControllerVersion(r.OperatorInfo.BuildInfo.Version) results := r.internalReconcile(es, state) err = r.updateStatus(es, state) if err != nil && apierrors.IsConflict(err) { - log.V(1).Info("Conflict while updating status") + log.V(1).Info("Conflict while updating status", "namespace", es.Namespace, "es_name", es.Name) return reconcile.Result{Requeue: true}, nil } return results.WithError(err).Aggregate() @@ -266,10 +266,10 @@ func (r *ReconcileElasticsearch) updateStatus( es elasticsearchv1alpha1.Elasticsearch, reconcileState *esreconcile.State, ) error { - log.Info("Updating status", "iteration", atomic.LoadInt64(&r.iteration)) + log.Info("Updating status", "iteration", atomic.LoadInt64(&r.iteration), "namespace", es.Namespace, "es_name", es.Name) events, cluster := reconcileState.Apply() for _, evt := range events { - log.Info(fmt.Sprintf("Recording event %+v", evt)) + log.V(1).Info("Recording event", "event", evt) r.recorder.Event(&es, evt.EventType, evt.Reason, evt.Message) } if cluster == nil { diff --git a/operators/pkg/controller/elasticsearch/initcontainer/initcontainer.go b/operators/pkg/controller/elasticsearch/initcontainer/initcontainer.go index 639100a7f5..3dcb682d48 100644 --- a/operators/pkg/controller/elasticsearch/initcontainer/initcontainer.go +++ b/operators/pkg/controller/elasticsearch/initcontainer/initcontainer.go @@ -18,7 +18,7 @@ const ( // osSettingsContainerName is the name of the container that tweaks os-level settings osSettingsContainerName = "elastic-internal-init-os-settings" // prepareFilesystemContainerName is the name of the container that prepares the filesystem - prepareFilesystemContainerName = "elastic-internal-init-filesystem" + PrepareFilesystemContainerName = "elastic-internal-init-filesystem" ) // NewInitContainers creates init containers according to the given parameters diff --git a/operators/pkg/controller/elasticsearch/initcontainer/prepare_fs.go b/operators/pkg/controller/elasticsearch/initcontainer/prepare_fs.go index 73496ee39a..3250a57267 100644 --- a/operators/pkg/controller/elasticsearch/initcontainer/prepare_fs.go +++ b/operators/pkg/controller/elasticsearch/initcontainer/prepare_fs.go @@ -103,7 +103,7 @@ func NewPrepareFSInitContainer( container := corev1.Container{ Image: imageName, ImagePullPolicy: corev1.PullIfNotPresent, - Name: prepareFilesystemContainerName, + Name: PrepareFilesystemContainerName, SecurityContext: &corev1.SecurityContext{ Privileged: &privileged, }, diff --git a/operators/pkg/controller/elasticsearch/initcontainer/prepare_fs_script.go b/operators/pkg/controller/elasticsearch/initcontainer/prepare_fs_script.go index e2a277636c..58642185dc 100644 --- a/operators/pkg/controller/elasticsearch/initcontainer/prepare_fs_script.go +++ b/operators/pkg/controller/elasticsearch/initcontainer/prepare_fs_script.go @@ -6,6 +6,7 @@ package initcontainer import ( "bytes" + "fmt" "html/template" ) @@ -43,7 +44,10 @@ func RenderScriptTemplate(params TemplateParams) (string, error) { return tplBuffer.String(), nil } -const PrepareFsScriptConfigKey = "prepare-fs.sh" +const ( + PrepareFsScriptConfigKey = "prepare-fs.sh" + UnsupportedDistroExitCode = 42 +) // scriptTemplate is the main script to be run // in the prepare-fs init container before ES starts @@ -52,6 +56,13 @@ var scriptTemplate = template.Must(template.New("").Parse( set -eu + # the operator only works with the default ES distribution + license=/usr/share/elasticsearch/LICENSE.txt + if [[ ! -f $license || $(grep -Fxc "ELASTIC LICENSE AGREEMENT" $license) -ne 1 ]]; then + >&2 echo "unsupported_distribution" + exit ` + fmt.Sprintf("%d", UnsupportedDistroExitCode) + ` + fi + # compute time in seconds since the given start time function duration() { local start=$1 diff --git a/operators/pkg/controller/elasticsearch/keystore/updater.go b/operators/pkg/controller/elasticsearch/keystore/updater.go index 730456603c..aaa4e5f680 100644 --- a/operators/pkg/controller/elasticsearch/keystore/updater.go +++ b/operators/pkg/controller/elasticsearch/keystore/updater.go @@ -167,7 +167,7 @@ func (u *Updater) updateKeystore() (error, string) { for _, file := range fileInfos { if strings.HasPrefix(file.Name(), ".") { - log.Info(fmt.Sprintf("Ignoring %s", file.Name())) + log.Info("Ignoring file", "file_name", file.Name()) continue } diff --git a/operators/pkg/controller/elasticsearch/mutation/change_group.go b/operators/pkg/controller/elasticsearch/mutation/change_group.go index 4893cb183f..08e6922be0 100644 --- a/operators/pkg/controller/elasticsearch/mutation/change_group.go +++ b/operators/pkg/controller/elasticsearch/mutation/change_group.go @@ -90,16 +90,11 @@ func (s ChangeGroup) calculatePerformableChanges( ) error { changeStats := s.ChangeStats() - log.V(3).Info( + log.V(1).Info( "Calculating performable changes for group", "group_name", s.Name, "change_stats", changeStats, "pods_state_status", s.PodsState.Status(), - ) - - log.V(4).Info( - "Calculating performable changes for group", - "group_name", s.Name, "pods_state_summary", s.PodsState.Summary(), ) @@ -144,9 +139,10 @@ func (s ChangeGroup) calculatePerformableChanges( // schedule for creation as many pods as we can for _, newPodToCreate := range s.Changes.ToCreate { if changeStats.CurrentSurge >= maxSurge { - log.V(4).Info( + log.V(1).Info( "Hit the max surge limit in a group.", "group_name", s.Name, + "namespace", newPodToCreate.Pod.Namespace, "change_stats", changeStats, ) result.MaxSurgeGroups = append(result.MaxSurgeGroups, s.Name) @@ -160,6 +156,8 @@ func (s ChangeGroup) calculatePerformableChanges( "Scheduling a pod for creation", "group_name", s.Name, "change_stats", changeStats, + "pod_name", newPodToCreate.Pod.Name, + "namespace", newPodToCreate.Pod.Namespace, "mismatch_reasons", newPodToCreate.MismatchReasons, ) @@ -181,7 +179,7 @@ func (s ChangeGroup) calculatePerformableChanges( } if changeStats.CurrentUnavailable >= maxUnavailable { - log.V(4).Info( + log.V(1).Info( "Hit the max unavailable limit in a group.", "group_name", s.Name, "change_stats", changeStats, @@ -194,9 +192,11 @@ func (s ChangeGroup) calculatePerformableChanges( changeStats.CurrentUnavailable++ changeStats.CurrentRunningReadyPods-- - log.V(4).Info( + log.V(1).Info( "Scheduling a pod for deletion", "group_name", s.Name, + "pod_name", pod.Pod.Name, + "namespace", pod.Pod.Namespace, "change_stats", changeStats, ) diff --git a/operators/pkg/controller/elasticsearch/mutation/performable.go b/operators/pkg/controller/elasticsearch/mutation/performable.go index 223d217a1d..a3563b26b8 100644 --- a/operators/pkg/controller/elasticsearch/mutation/performable.go +++ b/operators/pkg/controller/elasticsearch/mutation/performable.go @@ -59,7 +59,7 @@ func CalculatePerformableChanges( if err != nil { return nil, err } - log.V(3).Info("Created change groups", "count", len(changeGroups)) + log.V(1).Info("Created change groups", "name", AllGroupName, "count", len(changeGroups)) podRestrictions := NewPodRestrictions(allPodsState) diff --git a/operators/pkg/controller/elasticsearch/observer/manager.go b/operators/pkg/controller/elasticsearch/observer/manager.go index f847f5f36c..df2a1054e8 100644 --- a/operators/pkg/controller/elasticsearch/observer/manager.go +++ b/operators/pkg/controller/elasticsearch/observer/manager.go @@ -53,7 +53,7 @@ func (m *Manager) Observe(cluster types.NamespacedName, caCerts []*x509.Certific case !exists: return m.createObserver(cluster, caCerts, esClient) case exists && (!observer.esClient.Equal(esClient) || !reflect.DeepEqual(observer.caCerts, caCerts)): - log.Info("Replacing observer HTTP client", "cluster", cluster) + log.Info("Replacing observer HTTP client", "namespace", cluster.Namespace, "es_name", cluster.Name) m.StopObserving(cluster) return m.createObserver(cluster, caCerts, esClient) default: diff --git a/operators/pkg/controller/elasticsearch/observer/observer.go b/operators/pkg/controller/elasticsearch/observer/observer.go index 1aaa2d99fd..eb28afb3da 100644 --- a/operators/pkg/controller/elasticsearch/observer/observer.go +++ b/operators/pkg/controller/elasticsearch/observer/observer.go @@ -93,7 +93,7 @@ func NewObserver( } observer.pmClientFactory = observer.createProcessManagerClient - log.Info("Creating observer", "cluster", cluster) + log.Info("Creating observer for cluster", "namespace", cluster.Namespace, "es_name", cluster.Name) return &observer } @@ -140,7 +140,7 @@ func (o *Observer) runPeriodically(ctx context.Context) { case <-ticker.C: o.retrieveState(ctx) case <-ctx.Done(): - log.Info("Stopping observer", "cluster", o.cluster) + log.Info("Stopping observer for cluster", "namespace", o.cluster.Namespace, "es_name", o.cluster.Name) return } } @@ -149,7 +149,7 @@ func (o *Observer) runPeriodically(ctx context.Context) { // retrieveState retrieves the current ES state, executes onObservation, // and stores the new state func (o *Observer) retrieveState(ctx context.Context) { - log.V(4).Info("Retrieving state", "cluster", o.cluster) + log.V(1).Info("Retrieving cluster state", "es_name", o.cluster.Name, "namespace", o.cluster.Namespace) timeoutCtx, cancel := context.WithTimeout(ctx, o.settings.RequestTimeout) defer cancel() diff --git a/operators/pkg/controller/elasticsearch/observer/state.go b/operators/pkg/controller/elasticsearch/observer/state.go index 8342f24fb4..ecf2931584 100644 --- a/operators/pkg/controller/elasticsearch/observer/state.go +++ b/operators/pkg/controller/elasticsearch/observer/state.go @@ -47,7 +47,8 @@ func RetrieveState( go func() { clusterState, err := esClient.GetClusterState(ctx) if err != nil { - log.V(3).Info("Unable to retrieve cluster state", "error", err) + // This is expected to happen from time to time + log.V(1).Info("Unable to retrieve cluster state", "error", err, "namespace", cluster.Namespace, "es_name", cluster.Name) clusterStateChan <- nil return } @@ -57,7 +58,7 @@ func RetrieveState( go func() { health, err := esClient.GetClusterHealth(ctx) if err != nil { - log.V(3).Info("Unable to retrieve cluster health", "error", err) + log.V(1).Info("Unable to retrieve cluster health", "error", err, "namespace", cluster.Namespace, "es_name", cluster.Name) healthChan <- nil return } @@ -67,7 +68,7 @@ func RetrieveState( go func() { license, err := esClient.GetLicense(ctx) if err != nil { - log.V(3).Info("Unable to retrieve cluster license", "error", err) + log.V(1).Info("Unable to retrieve cluster license", "error", err, "namespace", cluster.Namespace, "es_name", cluster.Name) licenseChan <- nil return } @@ -109,7 +110,7 @@ func RetrieveState( func getKeystoreStatus(ctx context.Context, pmClientFactory pmClientFactory, pod corev1.Pod) keystore.Status { if !k8s.IsPodReady(pod) { - log.V(3).Info("Pod not ready to retrieve keystore status", "pod_name", pod.Name) + log.V(1).Info("Pod not ready to retrieve keystore status", "namespace", pod.Namespace, "pod_name", pod.Name) return keystore.Status{State: keystore.WaitingState, Reason: "Pod not ready"} } @@ -117,10 +118,10 @@ func getKeystoreStatus(ctx context.Context, pmClientFactory pmClientFactory, pod defer client.Close() status, err := client.KeystoreStatus(ctx) if err != nil { - log.V(3).Info("Unable to retrieve keystore status", "pod_name", pod.Name, "error", err) + log.Error(err, "Unable to retrieve keystore status", "namespace", pod.Namespace, "pod_name", pod.Name) return keystore.Status{State: keystore.FailedState, Reason: "Unable to retrieve keystore status"} } - log.V(3).Info("Keystore updater", "pod_name", pod.Name, "status", status) + log.V(1).Info("Keystore status retrieved successfully", "namespace", pod.Namespace, "pod_name", pod.Name, "status", status) return status } diff --git a/operators/pkg/controller/elasticsearch/processmanager/process.go b/operators/pkg/controller/elasticsearch/processmanager/process.go index 8730b64642..3e338d1ec8 100644 --- a/operators/pkg/controller/elasticsearch/processmanager/process.go +++ b/operators/pkg/controller/elasticsearch/processmanager/process.go @@ -216,7 +216,7 @@ func exitCode(err error) int { exitCode = waitStatus.ExitStatus() } } else { - log.Info("Failed to terminate process", "err", err.Error()) + log.Error(err, "Failed to terminate process") exitCode = 1 } } diff --git a/operators/pkg/controller/elasticsearch/processmanager/state.go b/operators/pkg/controller/elasticsearch/processmanager/state.go index 9b4492f955..d4f8294f56 100644 --- a/operators/pkg/controller/elasticsearch/processmanager/state.go +++ b/operators/pkg/controller/elasticsearch/processmanager/state.go @@ -101,6 +101,7 @@ func (p *Process) updateState(action string, signal syscall.Signal, lastErr erro err := p.state.Write() if err != nil { + log.Error(err, "Failed to write process state, exiting") Exit(fmt.Sprintf("Failed to write process state: %s", err), 1) } diff --git a/operators/pkg/controller/elasticsearch/reconcile/state.go b/operators/pkg/controller/elasticsearch/reconcile/state.go index b02db91547..85c5bfc738 100644 --- a/operators/pkg/controller/elasticsearch/reconcile/state.go +++ b/operators/pkg/controller/elasticsearch/reconcile/state.go @@ -157,3 +157,13 @@ func (s *State) UpdateElasticsearchInvalid(results []validation.Result) { s.AddEvent(corev1.EventTypeWarning, events.EventReasonValidation, r.Reason) } } + +// UpdateElasticsearchControllerVersion sets the elasticsearch controller version that last updated the ES cluster +func (s *State) UpdateElasticsearchControllerVersion(version string) { + s.status.ControllerVersion = version +} + +// GetElasticsearchControllerVersion returns the elasticsearch controller version that last updated the ES cluster +func (s *State) GetElasticsearchControllerVersion() string { + return s.status.ControllerVersion +} diff --git a/operators/pkg/controller/elasticsearch/restart/annotations.go b/operators/pkg/controller/elasticsearch/restart/annotations.go index 0cebd30426..b14aa758aa 100644 --- a/operators/pkg/controller/elasticsearch/restart/annotations.go +++ b/operators/pkg/controller/elasticsearch/restart/annotations.go @@ -82,7 +82,8 @@ func isAnnotatedForRestart(pod corev1.Pod) bool { func setPhase(client k8s.Client, pod corev1.Pod, phase Phase) error { log.V(1).Info( "Setting restart phase", - "pod", pod.Name, + "namespace", pod.Namespace, + "pod_name", pod.Name, "phase", phase, ) if pod.Annotations == nil { @@ -116,7 +117,8 @@ func getStrategy(pod corev1.Pod) Strategy { func setScheduleRestartAnnotations(client k8s.Client, pod corev1.Pod, strategy Strategy, startTime time.Time) error { log.V(1).Info( "Scheduling restart", - "pod", pod.Name, + "namespace", pod.Namespace, + "pod_name", pod.Name, "strategy", strategy, ) if pod.Annotations == nil { diff --git a/operators/pkg/controller/elasticsearch/restart/coordinated.go b/operators/pkg/controller/elasticsearch/restart/coordinated.go index 489b3f2cf1..7f062ef444 100644 --- a/operators/pkg/controller/elasticsearch/restart/coordinated.go +++ b/operators/pkg/controller/elasticsearch/restart/coordinated.go @@ -189,7 +189,7 @@ func (c *CoordinatedRestart) start() Step { } if podsDone != len(pods) { - log.V(1).Info("Some pods are not started yet", "expected", len(pods), "actual", podsDone) + log.V(1).Info("Some pods are not started yet", "namespace", c.Cluster.Namespace, "es_name", c.Cluster.Name, "expected", len(pods), "actual", podsDone) return false, nil // requeue } @@ -202,12 +202,12 @@ func (c *CoordinatedRestart) start() Step { return false, err } if !esReachable { - log.V(1).Info("Cluster is not ready to receive requests yet") + log.V(1).Info("Cluster is not ready to receive requests yet", "namespace", c.Cluster.Namespace, "es_name", c.Cluster.Name) return false, nil // requeue } // re-enable shard allocation - log.V(1).Info("Enabling shards allocation") + log.V(1).Info("Enabling shards allocation", "namespace", c.Cluster.Namespace, "es_name", c.Cluster.Name) ctx, cancel := context.WithTimeout(context.Background(), client.DefaultReqTimeout) defer cancel() if err := c.EsClient.EnableShardAllocation(ctx); err != nil { @@ -225,7 +225,7 @@ func (c *CoordinatedRestart) start() Step { corev1.EventTypeNormal, events.EventReasonRestart, fmt.Sprintf("Coordinated restart complete for cluster %s", c.Cluster.Name), ) - log.Info("Coordinated restart complete", "cluster", c.Cluster.Name) + log.Info("Coordinated restart complete", "es_name", c.Cluster.Name) return true, nil }, diff --git a/operators/pkg/controller/elasticsearch/restart/process_manager.go b/operators/pkg/controller/elasticsearch/restart/process_manager.go index b9791b5560..006594b56e 100644 --- a/operators/pkg/controller/elasticsearch/restart/process_manager.go +++ b/operators/pkg/controller/elasticsearch/restart/process_manager.go @@ -51,7 +51,7 @@ func ensureESProcessStopped(pmClient processmanager.Client, podName string) (boo // request ES process stop (idempotent) ctx, cancel := context.WithTimeout(context.Background(), processmanager.DefaultReqTimeout) defer cancel() - log.V(1).Info("Requesting ES process stop", "pod", podName) + log.V(1).Info("Requesting ES process stop", "pod_name", podName) status, err := pmClient.Stop(ctx) if err != nil { return false, err @@ -59,12 +59,12 @@ func ensureESProcessStopped(pmClient processmanager.Client, podName string) (boo // we got the current status back, check if the process is stopped if status.State != processmanager.Stopped { - log.V(1).Info("ES process is not stopped yet", "pod", podName, "state", status.State) + log.V(1).Info("ES process is not stopped yet", "pod_name", podName, "state", status.State) // not stopped yet, requeue return false, nil } - log.V(1).Info("ES process successfully stopped", "pod", podName) + log.V(1).Info("ES process successfully stopped", "pod_name", podName) return true, nil } @@ -73,7 +73,7 @@ func ensureESProcessStarted(pmClient processmanager.Client, podName string) (boo // request ES process start (idempotent) ctx, cancel := context.WithTimeout(context.Background(), processmanager.DefaultReqTimeout) defer cancel() - log.V(1).Info("Requesting ES process start", "pod", podName) + log.V(1).Info("Requesting ES process start", "pod_name", podName) status, err := pmClient.Start(ctx) if err != nil { return false, err @@ -81,11 +81,11 @@ func ensureESProcessStarted(pmClient processmanager.Client, podName string) (boo // we got the current status back, check if the process is started if status.State != processmanager.Started { - log.V(1).Info("ES process is not started yet", "pod", podName, "state", status.State) + log.V(1).Info("ES process is not started yet", "pod_name", podName, "state", status.State) // not started yet, requeue return false, nil } - log.V(1).Info("ES process successfully started", "pod", podName) + log.V(1).Info("ES process successfully started", "pod_name", podName) return true, nil } diff --git a/operators/pkg/controller/elasticsearch/restart/restart.go b/operators/pkg/controller/elasticsearch/restart/restart.go index 046bf6b40e..495c4d607e 100644 --- a/operators/pkg/controller/elasticsearch/restart/restart.go +++ b/operators/pkg/controller/elasticsearch/restart/restart.go @@ -80,7 +80,7 @@ func processOngoingRestarts(restartContext RestartContext) (done bool, err error return true, nil } - log.V(1).Info("Pods annotated for restart") + log.V(1).Info("Pods annotated for restart", "namespace", restartContext.Cluster.Namespace, "es_name", restartContext.Cluster.Name) if len(annotatedPods[StrategyCoordinated]) > 0 { // run the coordinated restart @@ -129,7 +129,7 @@ func schedulePodsRestart(c k8s.Client, pods pod.PodsWithConfig, strategy Strateg count := 0 for _, p := range pods { if isAnnotatedForRestart(p.Pod) { - log.V(1).Info("Pod already in a restart phase", "pod", p.Pod.Name) + log.V(1).Info("Pod already in a restart phase", "namespace", p.Pod.Namespace, "pod_name", p.Pod.Name) continue } if err := setScheduleRestartAnnotations(c, p.Pod, strategy, time.Now()); err != nil { diff --git a/operators/pkg/controller/elasticsearch/settings/masters.go b/operators/pkg/controller/elasticsearch/settings/masters.go index 00fb940431..81ad7b7783 100644 --- a/operators/pkg/controller/elasticsearch/settings/masters.go +++ b/operators/pkg/controller/elasticsearch/settings/masters.go @@ -88,10 +88,10 @@ func UpdateSeedHostsConfigMap( reconciled.Data = expected.Data }, PreCreate: func() { - log.Info("Creating seed hosts", "hosts", seedHosts) + log.Info("Creating seed hosts", "namespace", es.Namespace, "es_name", es.Name, "hosts", seedHosts) }, PostUpdate: func() { - log.Info("Seed hosts updated", "hosts", seedHosts) + log.Info("Seed hosts updated", "namespace", es.Namespace, "es_name", es.Name, "hosts", seedHosts) annotation.MarkPodsAsUpdated(c, client.ListOptions{ Namespace: es.Namespace, diff --git a/operators/pkg/controller/elasticsearch/settings/secure_settings.go b/operators/pkg/controller/elasticsearch/settings/secure_settings.go index 9506062d83..9307fdfb25 100644 --- a/operators/pkg/controller/elasticsearch/settings/secure_settings.go +++ b/operators/pkg/controller/elasticsearch/settings/secure_settings.go @@ -84,7 +84,7 @@ func retrieveUserSecret(c k8s.Client, eventsRecorder *events.Recorder, namespace err := c.Get(types.NamespacedName{Namespace: namespace, Name: name}, &userSecret) if err != nil && apierrors.IsNotFound(err) { msg := "Secure settings secret not found" - log.Info(msg, "name", name) + log.Info(msg, "namespace", namespace, "secret_name", name) eventsRecorder.AddEvent(corev1.EventTypeWarning, events.EventReasonUnexpected, msg+": "+name) } else if err != nil { return nil, err diff --git a/operators/pkg/controller/elasticsearch/version/version6/zen1.go b/operators/pkg/controller/elasticsearch/version/version6/zen1.go index 6bc4acb22f..1a9b5426d9 100644 --- a/operators/pkg/controller/elasticsearch/version/version6/zen1.go +++ b/operators/pkg/controller/elasticsearch/version/version6/zen1.go @@ -61,6 +61,8 @@ func UpdateZen1Discovery( // Update the current value in the configuration of existing pods log.V(1).Info("Set minimum master nodes", "how", "configuration", + "namespace", cluster.Namespace, + "es_name", cluster.Name, "currentMasterCount", currentMasterCount, "nextMasterCount", nextMasterCount, "minimum_master_nodes", minimumMasterNodes, @@ -106,7 +108,10 @@ func UpdateZen1Discovery( // Do not attempt to make an API call if there is not enough available masters if currentAvailableMasterCount < minimumMasterNodes { + // This is expected to happen from time to time log.V(1).Info("Not enough masters to update the API", + "namespace", cluster.Namespace, + "es_name", cluster.Name, "current", currentAvailableMasterCount, "required", minimumMasterNodes) // We can't update the minimum master nodes right now, it is the case if a new master node is not created yet. @@ -114,8 +119,10 @@ func UpdateZen1Discovery( return true, nil } - log.Info("Update minimum master nodes", + log.Info("Updating minimum master nodes", "how", "api", + "namespace", cluster.Namespace, + "es_name", cluster.Name, "currentMasterCount", currentMasterCount, "nextMasterCount", nextMasterCount, "minimum_master_nodes", minimumMasterNodes, diff --git a/operators/pkg/controller/kibana/config/settings.go b/operators/pkg/controller/kibana/config/settings.go index 557649bc9d..705c7232d2 100644 --- a/operators/pkg/controller/kibana/config/settings.go +++ b/operators/pkg/controller/kibana/config/settings.go @@ -9,6 +9,7 @@ import ( commonv1alpha1 "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" "github.com/elastic/cloud-on-k8s/operators/pkg/apis/kibana/v1alpha1" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/association" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/certificates" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/settings" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/kibana/es" @@ -37,7 +38,7 @@ func NewConfigSettings(client k8s.Client, kb v1alpha1.Kibana) (CanonicalConfig, return CanonicalConfig{}, err } - esAuthSettings, err := elasticsearchAuthSettings(client, kb) + username, password, err := association.ElasticsearchAuthSettings(client, &kb) if err != nil { return CanonicalConfig{}, err } @@ -48,7 +49,12 @@ func NewConfigSettings(client k8s.Client, kb v1alpha1.Kibana) (CanonicalConfig, err = cfg.MergeWith( settings.MustCanonicalConfig(kibanaTLSSettings(kb)), settings.MustCanonicalConfig(elasticsearchTLSSettings(kb)), - settings.MustCanonicalConfig(esAuthSettings), + settings.MustCanonicalConfig( + map[string]interface{}{ + ElasticsearchUsername: username, + ElasticsearchPassword: password, + }, + ), userSettings, ) if err != nil { @@ -85,25 +91,3 @@ func elasticsearchTLSSettings(kb v1alpha1.Kibana) map[string]interface{} { ElasticsearchSslVerificationMode: "certificate", } } - -func elasticsearchAuthSettings(client k8s.Client, kb v1alpha1.Kibana) (map[string]interface{}, error) { - authSettings := map[string]interface{}{} - auth := kb.Spec.Elasticsearch.Auth - if auth.Inline != nil { - authSettings = map[string]interface{}{ - ElasticsearchUsername: auth.Inline.Username, - ElasticsearchPassword: auth.Inline.Password, - } - } - if auth.SecretKeyRef != nil { - secret, err := es.GetAuthSecret(client, kb) - if err != nil { - return nil, err - } - authSettings = map[string]interface{}{ - ElasticsearchUsername: auth.SecretKeyRef.Key, - ElasticsearchPassword: string(secret.Data[auth.SecretKeyRef.Key]), - } - } - return authSettings, nil -} diff --git a/operators/pkg/controller/kibana/config/settings_test.go b/operators/pkg/controller/kibana/config/settings_test.go index 63e26974c8..a9a0807f5c 100644 --- a/operators/pkg/controller/kibana/config/settings_test.go +++ b/operators/pkg/controller/kibana/config/settings_test.go @@ -21,6 +21,8 @@ var defaultConfig = []byte(` elasticsearch: hosts: - "" + username: "" + password: "" ssl: certificateAuthorities: /usr/share/kibana/config/elasticsearch-certs/tls.crt verificationMode: certificate diff --git a/operators/pkg/controller/kibana/driver.go b/operators/pkg/controller/kibana/driver.go index d3a558e904..a2b96faa48 100644 --- a/operators/pkg/controller/kibana/driver.go +++ b/operators/pkg/controller/kibana/driver.go @@ -10,6 +10,7 @@ import ( kbtype "github.com/elastic/cloud-on-k8s/operators/pkg/apis/kibana/v1alpha1" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/association/keystore" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/certificates" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/finalizer" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/operator" @@ -23,9 +24,9 @@ import ( "github.com/elastic/cloud-on-k8s/operators/pkg/controller/kibana/label" kbname "github.com/elastic/cloud-on-k8s/operators/pkg/controller/kibana/name" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/kibana/pod" - "github.com/elastic/cloud-on-k8s/operators/pkg/controller/kibana/securesettings" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/kibana/version/version6" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/kibana/version/version7" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/kibana/volume" "github.com/elastic/cloud-on-k8s/operators/pkg/utils/k8s" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -33,6 +34,14 @@ import ( "k8s.io/client-go/tools/record" ) +// initContainersParameters is used to generate the init container that will load the secure settings into a keystore +var initContainersParameters = keystore.InitContainerParameters{ + KeystoreCreateCommand: "/usr/share/kibana/bin/kibana-keystore create", + KeystoreAddCommand: "/usr/share/kibana/bin/kibana-keystore add", + SecureSettingsVolumeMountPath: keystore.SecureSettingsVolumeMountPath, + DataVolumePath: volume.DataVolumeMountPath, +} + type driver struct { client k8s.Client scheme *runtime.Scheme @@ -57,18 +66,27 @@ func secretWatchFinalizer(kibana kbtype.Kibana, watches watches.DynamicWatches) func (d *driver) deploymentParams(kb *kbtype.Kibana) (*DeploymentParams, error) { // setup a keystore with secure settings in an init container, if specified by the user - volumes, initContainers, secureSettingsVersion, err := securesettings.Resources(d.client, d.recorder, d.dynamicWatches, *kb) + //volumes, initContainers, secureSettingsVersion, err := securesettings.Resources(d.client, d.recorder, d.dynamicWatches, *kb) + keystoreResources, err := keystore.NewResources( + d.client, + d.recorder, + d.dynamicWatches, + kb, + initContainersParameters, + ) if err != nil { return nil, err } - kibanaPodSpec := pod.NewPodTemplateSpec(*kb, volumes, initContainers) + kibanaPodSpec := pod.NewPodTemplateSpec(*kb, keystoreResources) // Build a checksum of the configuration, which we can use to cause the Deployment to roll Kibana // instances in case of any change in the CA file, secure settings or credentials contents. // This is done because Kibana does not support updating those without restarting the process. configChecksum := sha256.New224() - configChecksum.Write([]byte(secureSettingsVersion)) + if keystoreResources != nil { + configChecksum.Write([]byte(keystoreResources.Version)) + } // we need to deref the secret here (if any) to include it in the checksum otherwise Kibana will not be rolled on contents changes if kb.Spec.Elasticsearch.Auth.SecretKeyRef != nil { @@ -178,7 +196,7 @@ func (d *driver) Reconcile( ) *reconciler.Results { results := reconciler.Results{} if !kb.Spec.Elasticsearch.IsConfigured() { - log.Info("Aborting Kibana deployment reconciliation as no Elasticsearch backend is configured") + log.Info("Aborting Kibana deployment reconciliation as no Elasticsearch backend is configured", "namespace", kb.Namespace, "kibana_name", kb.Name) return &results } diff --git a/operators/pkg/controller/kibana/driver_test.go b/operators/pkg/controller/kibana/driver_test.go index 279fb11f6e..a803ad9593 100644 --- a/operators/pkg/controller/kibana/driver_test.go +++ b/operators/pkg/controller/kibana/driver_test.go @@ -145,7 +145,7 @@ func Test_driver_deploymentParams(t *testing.T) { NodeCount: 1, Elasticsearch: kbtype.BackendElasticsearch{ URL: "https://localhost:9200", - Auth: kbtype.ElasticsearchAuth{ + Auth: v1alpha1.ElasticsearchAuth{ SecretKeyRef: &corev1.SecretKeySelector{ LocalObjectReference: corev1.LocalObjectReference{ Name: "test-auth", diff --git a/operators/pkg/controller/kibana/kibana_controller.go b/operators/pkg/controller/kibana/kibana_controller.go index 75a316a0b4..64d851d6cc 100644 --- a/operators/pkg/controller/kibana/kibana_controller.go +++ b/operators/pkg/controller/kibana/kibana_controller.go @@ -11,12 +11,12 @@ import ( kibanav1alpha1 "github.com/elastic/cloud-on-k8s/operators/pkg/apis/kibana/v1alpha1" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/association/keystore" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/events" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/finalizer" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/operator" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/version" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/watches" - "github.com/elastic/cloud-on-k8s/operators/pkg/controller/kibana/securesettings" "github.com/elastic/cloud-on-k8s/operators/pkg/utils/k8s" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -129,9 +129,9 @@ func (r *ReconcileKibana) Reconcile(request reconcile.Request) (reconcile.Result // atomically update the iteration to support concurrent runs. currentIteration := atomic.AddInt64(&r.iteration, 1) iterationStartTime := time.Now() - log.Info("Start reconcile iteration", "iteration", currentIteration) + log.Info("Start reconcile iteration", "iteration", currentIteration, "namespace", request.Namespace, "kibana_name", request.Name) defer func() { - log.Info("End reconcile iteration", "iteration", currentIteration, "took", time.Since(iterationStartTime)) + log.Info("End reconcile iteration", "iteration", currentIteration, "took", time.Since(iterationStartTime), "namespace", request.Namespace, "kibana_name", request.Name) }() // Fetch the Kibana instance @@ -148,13 +148,14 @@ func (r *ReconcileKibana) Reconcile(request reconcile.Request) (reconcile.Result } if common.IsPaused(kb.ObjectMeta) { - log.Info("Paused : skipping reconciliation", "iteration", currentIteration) + log.Info("Object is paused. Skipping reconciliation", "namespace", kb.Namespace, "kibana_name", kb.Name, "iteration", currentIteration) return common.PauseRequeue, nil } if err := r.finalizers.Handle(kb, r.finalizersFor(*kb)...); err != nil { if errors.IsConflict(err) { - log.V(1).Info("Conflict while handling secret watch finalizer") + // Conflicts are expected and should be resolved on next loop + log.V(1).Info("Conflict while handling secret watch finalizer", "namespace", kb.Namespace, "kibana_name", kb.Name) return reconcile.Result{Requeue: true}, nil } return reconcile.Result{}, err @@ -170,6 +171,7 @@ func (r *ReconcileKibana) Reconcile(request reconcile.Request) (reconcile.Result return reconcile.Result{}, err } state := NewState(request, kb) + state.UpdateKibanaControllerVersion(r.params.OperatorInfo.BuildInfo.Version) driver, err := newDriver(r, r.scheme, *ver, r.dynamicWatches, r.recorder) if err != nil { return reconcile.Result{}, err @@ -179,7 +181,7 @@ func (r *ReconcileKibana) Reconcile(request reconcile.Request) (reconcile.Result // update status err = r.updateStatus(state) if err != nil && errors.IsConflict(err) { - log.V(1).Info("Conflict while updating status") + log.V(1).Info("Conflict while updating status", "namespace", kb.Namespace, "kibana_name", kb.Name) return reconcile.Result{Requeue: true}, nil } return results.WithError(err).Aggregate() @@ -193,7 +195,7 @@ func (r *ReconcileKibana) updateStatus(state State) error { if state.Kibana.Status.IsDegraded(current.Status) { r.recorder.Event(current, corev1.EventTypeWarning, events.EventReasonUnhealthy, "Kibana health degraded") } - log.Info("Updating status", "iteration", atomic.LoadInt64(&r.iteration)) + log.Info("Updating status", "iteration", atomic.LoadInt64(&r.iteration), "namespace", state.Kibana.Namespace, "kibana_name", state.Kibana.Name) return r.Status().Update(state.Kibana) } @@ -201,6 +203,6 @@ func (r *ReconcileKibana) updateStatus(state State) error { func (r *ReconcileKibana) finalizersFor(kb kibanav1alpha1.Kibana) []finalizer.Finalizer { return []finalizer.Finalizer{ secretWatchFinalizer(kb, r.dynamicWatches), - securesettings.Finalizer(k8s.ExtractNamespacedName(&kb), r.dynamicWatches), + keystore.Finalizer(k8s.ExtractNamespacedName(&kb), r.dynamicWatches, &kb), } } diff --git a/operators/pkg/controller/kibana/name/name.go b/operators/pkg/controller/kibana/name/name.go index cd31b926a9..35d0be62e0 100644 --- a/operators/pkg/controller/kibana/name/name.go +++ b/operators/pkg/controller/kibana/name/name.go @@ -16,8 +16,7 @@ const ( // this leaves 63 - 36 = 27 characters for a suffix. MaxSuffixLength = MaxLabelLength - MaxElasticsearchNameLength - httpServiceSuffix = "http" - secureSettingsSecretSuffix = "secure-settings" + httpServiceSuffix = "http" ) // KBNamer is a Namer that is configured with the defaults for resources related to a Kibana resource. @@ -33,7 +32,3 @@ func HTTPService(kbName string) string { func Deployment(kbName string) string { return KBNamer.Suffix(kbName) } - -func SecureSettingsSecret(kbName string) string { - return KBNamer.Suffix(kbName, secureSettingsSecretSuffix) -} diff --git a/operators/pkg/controller/kibana/pod/pod.go b/operators/pkg/controller/kibana/pod/pod.go index 6991d14d8b..ce9900a039 100644 --- a/operators/pkg/controller/kibana/pod/pod.go +++ b/operators/pkg/controller/kibana/pod/pod.go @@ -6,6 +6,7 @@ package pod import ( "github.com/elastic/cloud-on-k8s/operators/pkg/apis/kibana/v1alpha1" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/association/keystore" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/defaults" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/kibana/label" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/kibana/volume" @@ -52,17 +53,22 @@ func imageWithVersion(image string, version string) string { return stringsutil.Concat(image, ":", version) } -func NewPodTemplateSpec(kb v1alpha1.Kibana, additionalVolumes []corev1.Volume, initContainers []corev1.Container) corev1.PodTemplateSpec { - return defaults.NewPodTemplateBuilder(kb.Spec.PodTemplate, v1alpha1.KibanaContainerName). +func NewPodTemplateSpec(kb v1alpha1.Kibana, keystore *keystore.Resources) corev1.PodTemplateSpec { + builder := defaults.NewPodTemplateBuilder(kb.Spec.PodTemplate, v1alpha1.KibanaContainerName). WithLabels(label.NewLabels(kb.Name)). WithDockerImage(kb.Spec.Image, imageWithVersion(defaultImageRepositoryAndName, kb.Spec.Version)). WithReadinessProbe(readinessProbe(kb.Spec.HTTP.TLS.Enabled())). WithPorts(ports). - WithVolumes(append(additionalVolumes, volume.KibanaDataVolume.Volume())...). - WithVolumeMounts(volume.KibanaDataVolume.VolumeMount()). - WithInitContainers(initContainers...). - WithInitContainerDefaults(). - PodTemplate + WithVolumes(volume.KibanaDataVolume.Volume()). + WithVolumeMounts(volume.KibanaDataVolume.VolumeMount()) + + if keystore != nil { + builder.WithVolumes(keystore.Volume). + WithInitContainers(keystore.InitContainer). + WithInitContainerDefaults() + } + + return builder.PodTemplate } // GetKibanaContainer returns the Kibana container from the given podSpec. diff --git a/operators/pkg/controller/kibana/pod/pod_test.go b/operators/pkg/controller/kibana/pod/pod_test.go index 3e6361baa8..e8bde21630 100644 --- a/operators/pkg/controller/kibana/pod/pod_test.go +++ b/operators/pkg/controller/kibana/pod/pod_test.go @@ -8,8 +8,8 @@ import ( "testing" "github.com/elastic/cloud-on-k8s/operators/pkg/apis/kibana/v1alpha1" + "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/association/keystore" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/kibana/label" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" @@ -45,11 +45,10 @@ func Test_imageWithVersion(t *testing.T) { func TestNewPodTemplateSpec(t *testing.T) { tests := []struct { - name string - kb v1alpha1.Kibana - additionalVolumes []corev1.Volume - initContainers []corev1.Container - assertions func(pod corev1.PodTemplateSpec) + name string + kb v1alpha1.Kibana + keystore *keystore.Resources + assertions func(pod corev1.PodTemplateSpec) }{ { name: "defaults", @@ -58,6 +57,7 @@ func TestNewPodTemplateSpec(t *testing.T) { Version: "7.1.0", }, }, + keystore: nil, assertions: func(pod corev1.PodTemplateSpec) { assert.Equal(t, false, *pod.Spec.AutomountServiceAccountToken) assert.Len(t, pod.Spec.Containers, 1) @@ -72,14 +72,16 @@ func TestNewPodTemplateSpec(t *testing.T) { }, }, { - name: "with additional volumes and init containers", + name: "with additional volumes and init containers for the Keystore", kb: v1alpha1.Kibana{ Spec: v1alpha1.KibanaSpec{ Version: "7.1.0", }, }, - additionalVolumes: []corev1.Volume{{Name: "vol"}}, - initContainers: []corev1.Container{{Name: "init"}}, + keystore: &keystore.Resources{ + InitContainer: corev1.Container{Name: "init"}, + Volume: corev1.Volume{Name: "vol"}, + }, assertions: func(pod corev1.PodTemplateSpec) { assert.Len(t, pod.Spec.InitContainers, 1) assert.Len(t, pod.Spec.Volumes, 2) @@ -91,6 +93,7 @@ func TestNewPodTemplateSpec(t *testing.T) { Image: "my-custom-image:1.0.0", Version: "7.1.0", }}, + keystore: nil, assertions: func(pod corev1.PodTemplateSpec) { assert.Equal(t, "my-custom-image:1.0.0", GetKibanaContainer(pod.Spec).Image) }, @@ -108,12 +111,14 @@ func TestNewPodTemplateSpec(t *testing.T) { }, }, }}, + keystore: nil, assertions: func(pod corev1.PodTemplateSpec) { assert.Len(t, pod.Spec.InitContainers, 1) }, }, { - name: "with user-provided labels", + name: "with user-provided labels", + keystore: nil, kb: v1alpha1.Kibana{ ObjectMeta: metav1.ObjectMeta{ Name: "kibana-name", @@ -191,7 +196,7 @@ func TestNewPodTemplateSpec(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := NewPodTemplateSpec(tt.kb, tt.additionalVolumes, tt.initContainers) + got := NewPodTemplateSpec(tt.kb, tt.keystore) tt.assertions(got) }) } diff --git a/operators/pkg/controller/kibana/securesettings/securesettings.go b/operators/pkg/controller/kibana/securesettings/securesettings.go deleted file mode 100644 index 5ff2d980f0..0000000000 --- a/operators/pkg/controller/kibana/securesettings/securesettings.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package securesettings - -import ( - "github.com/elastic/cloud-on-k8s/operators/pkg/apis/kibana/v1alpha1" - "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/watches" - "github.com/elastic/cloud-on-k8s/operators/pkg/utils/k8s" - corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/tools/record" - logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" -) - -var log = logf.Log.WithName("secure-settings") - -// Resources optionally returns a volume and init container to include in Kibana pods, -// in order to create a Keystore from secure settings referenced in the Kibana spec. -func Resources( - c k8s.Client, - recorder record.EventRecorder, - watches watches.DynamicWatches, - kb v1alpha1.Kibana, -) ([]corev1.Volume, []corev1.Container, string, error) { - // setup a volume from the user-provided secure settings secret - secretVolume, version, err := secureSettingsVolume(c, recorder, watches, kb) - if err != nil { - return nil, nil, "", err - } - if secretVolume == nil { - // nothing to do - return nil, nil, "", nil - } - - // build an init container to create Kibana keystore from the secure settings volume - initContainer := initContainer(*secretVolume) - - return []corev1.Volume{secretVolume.Volume()}, []corev1.Container{initContainer}, version, nil -} diff --git a/operators/pkg/controller/kibana/securesettings/securesettings_test.go b/operators/pkg/controller/kibana/securesettings/securesettings_test.go deleted file mode 100644 index a391d7d61e..0000000000 --- a/operators/pkg/controller/kibana/securesettings/securesettings_test.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package securesettings - -import ( - "reflect" - "testing" - - commonv1alpha1 "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" - "github.com/elastic/cloud-on-k8s/operators/pkg/apis/kibana/v1alpha1" - watches2 "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/watches" - "github.com/elastic/cloud-on-k8s/operators/pkg/utils/k8s" - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -var ( - testSecureSettingsSecretName = "secure-settings-secret" - testSecureSettingsSecret = corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "namespace", - Name: testSecureSettingsSecretName, - ResourceVersion: "resource-version", - }, - } - testSecureSettingsSecretRef = commonv1alpha1.SecretRef{ - SecretName: testSecureSettingsSecretName, - } - testKibana = v1alpha1.Kibana{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "namespace", - Name: "kibana", - }, - } - testKibanaWithSecureSettings = v1alpha1.Kibana{ - ObjectMeta: testKibana.ObjectMeta, - Spec: v1alpha1.KibanaSpec{ - SecureSettings: &testSecureSettingsSecretRef, - }, - } -) - -func TestResources(t *testing.T) { - tests := []struct { - name string - client k8s.Client - kb v1alpha1.Kibana - wantVolumes int - wantContainers int - wantVersion string - }{ - { - name: "no secure settings specified: no resources", - client: k8s.WrapClient(fake.NewFakeClient()), - kb: v1alpha1.Kibana{}, - wantVolumes: 0, - wantContainers: 0, - wantVersion: "", - }, - { - name: "secure settings specified: return volume, init container and version", - client: k8s.WrapClient(fake.NewFakeClient(&testSecureSettingsSecret)), - kb: testKibanaWithSecureSettings, - wantVolumes: 1, - wantContainers: 1, - wantVersion: testSecureSettingsSecret.ResourceVersion, - }, - { - name: "secure settings specified but secret not there: no resources", - client: k8s.WrapClient(fake.NewFakeClient()), - kb: testKibanaWithSecureSettings, - wantVolumes: 0, - wantContainers: 0, - wantVersion: "", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - recorder := record.NewFakeRecorder(1000) - watches := watches2.NewDynamicWatches() - require.NoError(t, watches.InjectScheme(scheme.Scheme)) - wantVolumes, wantContainers, wantVersion, err := Resources(tt.client, recorder, watches, tt.kb) - require.NoError(t, err) - if !reflect.DeepEqual(len(wantVolumes), tt.wantVolumes) { - t.Errorf("Resources() got = %v, want %v", wantVolumes, tt.wantVolumes) - } - if !reflect.DeepEqual(len(wantContainers), tt.wantContainers) { - t.Errorf("Resources() got1 = %v, want %v", wantContainers, tt.wantContainers) - } - if wantVersion != tt.wantVersion { - t.Errorf("Resources() got2 = %v, want %v", wantVersion, tt.wantVersion) - } - }) - } -} diff --git a/operators/pkg/controller/kibana/state.go b/operators/pkg/controller/kibana/state.go index 072628d0a1..f0d66fc722 100644 --- a/operators/pkg/controller/kibana/state.go +++ b/operators/pkg/controller/kibana/state.go @@ -36,3 +36,13 @@ func (s State) UpdateKibanaState(deployment v1.Deployment) { } } } + +// UpdateKibanaControllerVersion updates the Kibana status with the controller version that last updated the Kibana instance +func (s *State) UpdateKibanaControllerVersion(version string) { + s.Kibana.Status.ControllerVersion = version +} + +// GetKibanaControllerVersion returns the controller version that last updated the Kibana instance +func (s *State) GetKibanaControllerVersion() string { + return s.Kibana.Status.ControllerVersion +} diff --git a/operators/pkg/controller/kibana/volume/volumes.go b/operators/pkg/controller/kibana/volume/volumes.go index 9e21898588..ecfacc61a4 100644 --- a/operators/pkg/controller/kibana/volume/volumes.go +++ b/operators/pkg/controller/kibana/volume/volumes.go @@ -12,9 +12,6 @@ const ( DataVolumeName = "kibana-data" DataVolumeMountPath = "/usr/share/kibana/data" - SecureSettingsVolumeName = "elastic-internal-secure-settings" - SecureSettingsVolumeMountPath = "/mnt/elastic-internal/secure-settings" - HTTPCertificatesSecretVolumeName = "elastic-internal-http-certificates" HTTPCertificatesSecretVolumeMountPath = "/mnt/elastic-internal/http-certs" ) diff --git a/operators/pkg/controller/kibanaassociation/association_controller.go b/operators/pkg/controller/kibanaassociation/association_controller.go index bfab59820e..fd8771fdea 100644 --- a/operators/pkg/controller/kibanaassociation/association_controller.go +++ b/operators/pkg/controller/kibanaassociation/association_controller.go @@ -109,9 +109,9 @@ func (r *ReconcileAssociation) Reconcile(request reconcile.Request) (reconcile.R // atomically update the iteration to support concurrent runs. currentIteration := atomic.AddInt64(&r.iteration, 1) iterationStartTime := time.Now() - log.Info("Start reconcile iteration", "iteration", currentIteration, "request", request) + log.Info("Start reconcile iteration", "iteration", currentIteration, "namespace", request.Namespace, "kibana_name", request.Name) defer func() { - log.Info("End reconcile iteration", "iteration", currentIteration, "took", time.Since(iterationStartTime)) + log.Info("End reconcile iteration", "iteration", currentIteration, "took", time.Since(iterationStartTime), "namespace", request.Namespace, "kibana_name", request.Name) }() // retrieve Kibana resource @@ -134,6 +134,7 @@ func (r *ReconcileAssociation) Reconcile(request reconcile.Request) (reconcile.R ) if err != nil { if apierrors.IsConflict(err) { + // Conflicts are expected here and should be resolved on next loop log.V(1).Info("Conflict while handling finalizer") return reconcile.Result{Requeue: true}, nil } @@ -147,7 +148,7 @@ func (r *ReconcileAssociation) Reconcile(request reconcile.Request) (reconcile.R } if common.IsPaused(kibana.ObjectMeta) { - log.Info("Paused : skipping reconciliation", "iteration", currentIteration) + log.Info("Object is paused. Skipping reconciliation", "namespace", kibana.Namespace, "kibana_name", kibana.Name, "iteration", currentIteration) return common.PauseRequeue, nil } @@ -157,7 +158,8 @@ func (r *ReconcileAssociation) Reconcile(request reconcile.Request) (reconcile.R kibana.Status.AssociationStatus = newStatus if err := r.Status().Update(&kibana); err != nil { if apierrors.IsConflict(err) { - log.V(1).Info("Conflict while updating status") + // Conflicts are expected and will be resolved on next loop + log.V(1).Info("Conflict while updating status", "namespace", kibana.Namespace, "kibana_name", kibana.Name) return reconcile.Result{Requeue: true}, nil } @@ -183,7 +185,7 @@ func (r *ReconcileAssociation) reconcileInternal(kibana kbtype.Kibana) (commonv1 // garbage collect leftover resources that are not required anymore if err := deleteOrphanedResources(r, kibana); err != nil { - log.Error(err, "Error while trying to delete orphaned resources. Continuing.") + log.Error(err, "Error while trying to delete orphaned resources. Continuing.", "namespace", kibana.Namespace, "kibana_name", kibana.Name) } if kibana.Spec.ElasticsearchRef.Name == "" { @@ -230,7 +232,7 @@ func (r *ReconcileAssociation) reconcileInternal(kibana kbtype.Kibana) (commonv1 // remove connection details if they are set if (kibana.Spec.Elasticsearch != kbtype.BackendElasticsearch{}) { kibana.Spec.Elasticsearch = kbtype.BackendElasticsearch{} - log.Info("Removing Elasticsearch configuration from managed association", "kibana", kibana.Name) + log.Info("Removing Elasticsearch configuration from managed association", "namespace", kibana.Namespace, "kibana_name", kibana.Name) if err := r.Update(&kibana); err != nil { return commonv1alpha1.AssociationPending, err } @@ -257,7 +259,7 @@ func (r *ReconcileAssociation) reconcileInternal(kibana kbtype.Kibana) (commonv1 if !reflect.DeepEqual(kibana.Spec.Elasticsearch, expectedEsConfig) { kibana.Spec.Elasticsearch = expectedEsConfig - log.Info("Updating Kibana spec with Elasticsearch backend configuration") + log.Info("Updating Kibana spec with Elasticsearch backend configuration", "namespace", kibana.Namespace, "kibana_name", kibana.Name) if err := r.Update(&kibana); err != nil { return commonv1alpha1.AssociationPending, err } @@ -288,14 +290,14 @@ func deleteOrphanedResources(c k8s.Client, kibana kbtype.Kibana) error { if !kibana.Spec.ElasticsearchRef.IsDefined() { // look for association secrets owned by this kibana instance // which should not exist since no ES referenced in the spec - log.Info("Deleting", "secret", k8s.ExtractNamespacedName(&s)) + log.Info("Deleting secret", "namespace", s.Namespace, "secret_name", s.Name, "kibana_name", kibana.Name) if err := c.Delete(&s); err != nil && !apierrors.IsNotFound(err) { return err } } else if value, ok := s.Labels[common.TypeLabelName]; ok && value == user.UserType && esRefNamespace != s.Namespace { // User secret may live in an other namespace, check if it has changed - log.Info("Deleting", "secret", k8s.ExtractNamespacedName(&s)) + log.Info("Deleting secret", "namespace", s.Namespace, "secretname", s.Name, "kibana_name", kibana.Name) if err := c.Delete(&s); err != nil && !apierrors.IsNotFound(err) { return err } diff --git a/operators/pkg/controller/license/license_controller.go b/operators/pkg/controller/license/license_controller.go index b1d28369bb..096a06e342 100644 --- a/operators/pkg/controller/license/license_controller.go +++ b/operators/pkg/controller/license/license_controller.go @@ -51,16 +51,16 @@ func (r *ReconcileLicenses) Reconcile(request reconcile.Request) (reconcile.Resu // atomically update the iteration to support concurrent runs. currentIteration := atomic.AddInt64(&r.iteration, 1) iterationStartTime := time.Now() - log.Info("Start reconcile iteration", "iteration", currentIteration, "request", request) + log.Info("Start reconcile iteration", "iteration", currentIteration, "namespace", request.Namespace, "es_name", request.Name) defer func() { - log.Info("End reconcile iteration", "iteration", currentIteration, "took", time.Since(iterationStartTime)) + log.Info("End reconcile iteration", "iteration", currentIteration, "took", time.Since(iterationStartTime), "namespace", request.Namespace, "es_name", request.Name) }() result, err := r.reconcileInternal(request) if result.Requeue { - log.Info("Re-queuing new license check immediately (rate-limited)", "cluster", request.NamespacedName) + log.Info("Re-queuing new license check immediately (rate-limited)", "namespace", request.Namespace, "es_name", request.Name) } if result.RequeueAfter > 0 { - log.Info("Re-queuing new license check", "cluster", request.NamespacedName, "RequeueAfter", result.RequeueAfter) + log.Info("Re-queuing new license check", "namespace", request.Namespace, "es_name", request.Name, "RequeueAfter", result.RequeueAfter) } return result, err } diff --git a/operators/pkg/controller/license/trial/trial_controller.go b/operators/pkg/controller/license/trial/trial_controller.go index 5eaa8b067c..854949af5f 100644 --- a/operators/pkg/controller/license/trial/trial_controller.go +++ b/operators/pkg/controller/license/trial/trial_controller.go @@ -56,9 +56,9 @@ func (r *ReconcileTrials) Reconcile(request reconcile.Request) (reconcile.Result // atomically update the iteration to support concurrent runs. currentIteration := atomic.AddInt64(&r.iteration, 1) iterationStartTime := time.Now() - log.Info("Start reconcile iteration", "iteration", currentIteration, "request", request) + log.Info("Start reconcile iteration", "iteration", currentIteration, "namespace", request.Namespace, "secret_name", request.Name) defer func() { - log.Info("End reconcile iteration", "iteration", currentIteration, "took", time.Since(iterationStartTime)) + log.Info("End reconcile iteration", "iteration", currentIteration, "took", time.Since(iterationStartTime), "namespace", request.Namespace, "secret_name", request.Name) }() secret, license, err := licensing.TrialLicense(r, request.NamespacedName) diff --git a/operators/pkg/dev/portforward/pod_forwarder.go b/operators/pkg/dev/portforward/pod_forwarder.go index 26d8d0f2fa..71436e3986 100644 --- a/operators/pkg/dev/portforward/pod_forwarder.go +++ b/operators/pkg/dev/portforward/pod_forwarder.go @@ -201,7 +201,7 @@ func (f *podForwarder) Run(ctx context.Context) error { defer runCtxCancel() if f.clientset != nil { - log.Info("Watching pod for changes", "pod", f.podNSN) + log.Info("Watching pod for changes", "namespace", f.podNSN.Namespace, "pod_name", f.podNSN.Name) w, err := f.clientset.CoreV1().Pods(f.podNSN.Namespace).Watch(metav1.ListOptions{ FieldSelector: fields.OneTermEqualSelector("metadata.name", f.podNSN.Name).String(), }) @@ -217,7 +217,8 @@ func (f *podForwarder) Run(ctx context.Context) error { if evt.Type == watch.Deleted || evt.Type == watch.Error || evt.Type == "" { log.Info( "Pod is deleted or watch failed/closed, closing pod forwarder", - "pod", f.podNSN, + "namespace", f.podNSN.Namespace, + "pod_name", f.podNSN.Name, ) runCtxCancel() return diff --git a/operators/test/e2e/apm/configuration_test.go b/operators/test/e2e/apm/configuration_test.go new file mode 100644 index 0000000000..8e7c4e021b --- /dev/null +++ b/operators/test/e2e/apm/configuration_test.go @@ -0,0 +1,210 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package apm + +import ( + "fmt" + "testing" + + apmtype "github.com/elastic/cloud-on-k8s/operators/pkg/apis/apm/v1alpha1" + "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" + commonv1alpha1 "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" + "github.com/elastic/cloud-on-k8s/operators/pkg/utils/k8s" + "github.com/elastic/cloud-on-k8s/operators/test/e2e/test" + "github.com/elastic/cloud-on-k8s/operators/test/e2e/test/apmserver" + "github.com/elastic/cloud-on-k8s/operators/test/e2e/test/elasticsearch" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + yaml "gopkg.in/yaml.v2" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +const ( + APMKeystoreBin = "/usr/share/apm-server/apm-server" + APMKeystoreOption = "keystore" +) + +var APMKeystoreCmd = []string{APMKeystoreBin, APMKeystoreOption} + +type PartialApmConfiguration struct { + Output struct { + Elasticsearch struct { + CompressionLevel int `yaml:"compression_level"` + } `yaml:"elasticsearch"` + } `yaml:"output"` +} + +func TestUpdateConfiguration(t *testing.T) { + + // user-provided secure settings secret + secureSettingsSecretName := "secure-settings-secret" + secureSettings := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secureSettingsSecretName, + Namespace: test.Namespace, + }, + Data: map[string][]byte{ + "logging.verbose": []byte("true"), + }, + } + + name := "test-apm-configuration" + esBuilder := elasticsearch.NewBuilder(name). + WithESMasterDataNodes(1, elasticsearch.DefaultResources) + apmBuilder := apmserver.NewBuilder(name). + WithNamespace(test.Namespace). + WithVersion(test.ElasticStackVersion). + WithRestrictedSecurityContext() + + var previousPodUID *types.UID + + initStepsFn := func(k *test.K8sClient) test.StepList { + return test.StepList{ + { + Name: "Create secure settings secret", + Test: func(t *testing.T) { + // remove if already exists (ignoring errors) + _ = k.Client.Delete(&secureSettings) + // and create a fresh one + err := k.Client.Create(&secureSettings) + require.NoError(t, err) + }, + }, + // Keystore should be empty + test.CheckKeystoreEntries(k, test.ApmServerPodListOptions(name), APMKeystoreCmd, nil), + } + } + apmNamespacedName := types.NamespacedName{ + Name: name, + Namespace: test.Namespace, + } + + stepsFn := func(k *test.K8sClient) test.StepList { + return test.StepList{ + { + Name: "Check the value of a parameter in the configuration", + Test: func(t *testing.T) { + config, err := partialAPMConfiguration(k, name) + require.NoError(t, err) + require.Equal(t, config.Output.Elasticsearch.CompressionLevel, 5) // 5 is the expected default value + }, + }, + test.Step{ + Name: "Add a Keystore to the APM server", + Test: func(t *testing.T) { + // get current pod id + pods, err := k.GetPods(test.ApmServerPodListOptions(name)) + require.NoError(t, err) + require.True(t, len(pods) == 1) + previousPodUID = &pods[0].UID + + var apm apmtype.ApmServer + require.NoError(t, k.Client.Get(apmNamespacedName, &apm)) + apm.Spec.SecureSettings = &v1alpha1.SecretRef{ + SecretName: secureSettingsSecretName, + } + require.NoError(t, k.Client.Update(&apm)) + }, + }, + test.Step{ + Name: "APM Pod should be recreated", + Test: test.Eventually(func() error { + // get current pod id + pods, err := k.GetPods(test.ApmServerPodListOptions(name)) + if err != nil { + return err + } + if len(pods) != 1 { + return fmt.Errorf("1 APM pod expected, got %d", len(pods)) + } + if pods[0].UID == *previousPodUID { + return fmt.Errorf("APM pod is still the same, uid: %s", pods[0].UID) + } + return nil + }), + }, + + test.CheckKeystoreEntries(k, test.ApmServerPodListOptions(name), APMKeystoreCmd, []string{"logging.verbose"}), + + test.Step{ + Name: "Customize configuration of the APM server", + Test: func(t *testing.T) { + // get current pod id + pods, err := k.GetPods(test.ApmServerPodListOptions(name)) + require.NoError(t, err) + require.True(t, len(pods) == 1) + previousPodUID = &pods[0].UID + + var apm apmtype.ApmServer + require.NoError(t, k.Client.Get(apmNamespacedName, &apm)) + customConfig := commonv1alpha1.Config{ + Data: map[string]interface{}{"output.elasticsearch.compression_level": 1}, + } + apm.Spec.Config = &customConfig + require.NoError(t, k.Client.Update(&apm)) + }, + }, + test.Step{ + Name: "APM Pod should be recreated", + Test: test.Eventually(func() error { + // get current pod id + pods, err := k.GetPods(test.ApmServerPodListOptions(name)) + if err != nil { + return err + } + if len(pods) != 1 { + return fmt.Errorf("1 APM pod expected, got %d", len(pods)) + } + if pods[0].UID == *previousPodUID { + return fmt.Errorf("APM pod is still the same, uid: %s", pods[0].UID) + } + return nil + }), + }, + + test.Step{ + Name: "Check the value of a parameter in the configuration", + Test: func(t *testing.T) { + config, err := partialAPMConfiguration(k, name) + require.NoError(t, err) + require.Equal(t, config.Output.Elasticsearch.CompressionLevel, 1) // value should be updated to 1 + }, + }, + + // cleanup extra resources + test.Step{ + Name: "Delete secure settings secret", + Test: func(t *testing.T) { + err := k.Client.Delete(&secureSettings) + require.NoError(t, err) + }, + }, + } + } + + test.Sequence(initStepsFn, stepsFn, esBuilder, apmBuilder).RunSequential(t) + +} + +func partialAPMConfiguration(k *test.K8sClient, name string) (PartialApmConfiguration, error) { + var config PartialApmConfiguration + // get current pod id + pods, err := k.GetPods(test.ApmServerPodListOptions(name)) + if err != nil { + return config, err + } + // exec into the pod to list keystore entries + stdout, stderr, err := k.Exec(k8s.ExtractNamespacedName(&pods[0]), []string{"cat", "/usr/share/apm-server/config/config-secret/apm-server.yml"}) + if err != nil { + return config, errors.Wrap(err, fmt.Sprintf("stdout:\n%s\nstderr:\n%s", stdout, stderr)) + } + err = yaml.Unmarshal([]byte(stdout), &config) + if err != nil { + return config, err + } + return config, nil +} diff --git a/operators/test/e2e/kb/keystore_test.go b/operators/test/e2e/kb/keystore_test.go index df0d054c75..27096bb84e 100644 --- a/operators/test/e2e/kb/keystore_test.go +++ b/operators/test/e2e/kb/keystore_test.go @@ -17,6 +17,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const ( + KibanaKeystoreBin = "/usr/share/kibana/bin/kibana-keystore" +) + +var KibanaKeystoreCmd = []string{KibanaKeystoreBin} + func TestUpdateKibanaSecureSettings(t *testing.T) { // user-provided secure settings secret secureSettingsSecretName := "secure-settings-secret" @@ -55,7 +61,7 @@ func TestUpdateKibanaSecureSettings(t *testing.T) { } stepsFn := func(k *test.K8sClient) test.StepList { return test.StepList{ - kibana.CheckKibanaKeystoreEntries(k, kbBuilder.Kibana, []string{"logging.verbose"}), + test.CheckKeystoreEntries(k, test.KibanaPodListOptions(name), KibanaKeystoreCmd, []string{"logging.verbose"}), // modify the secure settings secret test.Step{ Name: "Modify secure settings secret", @@ -71,7 +77,7 @@ func TestUpdateKibanaSecureSettings(t *testing.T) { }, // keystore should be updated accordingly - kibana.CheckKibanaKeystoreEntries(k, kbBuilder.Kibana, []string{"logging.json", "logging.verbose"}), + test.CheckKeystoreEntries(k, test.KibanaPodListOptions(name), KibanaKeystoreCmd, []string{"logging.json", "logging.verbose"}), // remove the secure settings reference test.Step{ @@ -89,7 +95,7 @@ func TestUpdateKibanaSecureSettings(t *testing.T) { }, // keystore should be updated accordingly - kibana.CheckKibanaKeystoreEntries(k, kbBuilder.Kibana, nil), + test.CheckKeystoreEntries(k, test.KibanaPodListOptions(name), KibanaKeystoreCmd, nil), // cleanup extra resources test.Step{ diff --git a/operators/test/e2e/test/apmserver/builder.go b/operators/test/e2e/test/apmserver/builder.go index 072697744f..5594d98413 100644 --- a/operators/test/e2e/test/apmserver/builder.go +++ b/operators/test/e2e/test/apmserver/builder.go @@ -7,7 +7,10 @@ package apmserver import ( apmtype "github.com/elastic/cloud-on-k8s/operators/pkg/apis/apm/v1alpha1" common "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" + commonv1alpha1 "github.com/elastic/cloud-on-k8s/operators/pkg/apis/common/v1alpha1" "github.com/elastic/cloud-on-k8s/operators/test/e2e/test" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -16,6 +19,35 @@ type Builder struct { ApmServer apmtype.ApmServer } +func NewBuilder(name string) Builder { + meta := metav1.ObjectMeta{ + Name: name, + Namespace: test.Namespace, + } + return Builder{ + ApmServer: apmtype.ApmServer{ + ObjectMeta: meta, + Spec: apmtype.ApmServerSpec{ + NodeCount: 1, + Version: test.ElasticStackVersion, + Output: apmtype.Output{ + Elasticsearch: apmtype.ElasticsearchOutput{ + ElasticsearchRef: &commonv1alpha1.ObjectSelector{ + Name: name, + Namespace: test.Namespace, + }, + }, + }, + PodTemplate: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + SecurityContext: test.DefaultSecurityContext(), + }, + }, + }, + }, + } +} + func (b Builder) WithRestrictedSecurityContext() Builder { b.ApmServer.Spec.PodTemplate.Spec.SecurityContext = test.DefaultSecurityContext() return b diff --git a/operators/test/e2e/test/apmserver/checks_k8s.go b/operators/test/e2e/test/apmserver/checks_k8s.go index d26c97f8a4..5b60eb0faa 100644 --- a/operators/test/e2e/test/apmserver/checks_k8s.go +++ b/operators/test/e2e/test/apmserver/checks_k8s.go @@ -83,7 +83,7 @@ func CheckServices(b Builder, k *test.K8sClient) test.Step { Name: "ApmServer services should be created", Test: test.Eventually(func() error { for _, s := range []string{ - b.ApmServer.Name + "-apm-server", + b.ApmServer.Name + "-apm-http", } { if _, err := k.GetService(s); err != nil { return err @@ -100,7 +100,7 @@ func CheckServicesEndpoints(b Builder, k *test.K8sClient) test.Step { Name: "ApmServer services should have endpoints", Test: test.Eventually(func() error { for endpointName, addrCount := range map[string]int{ - b.ApmServer.Name + "-apm-server": int(b.ApmServer.Spec.NodeCount), + b.ApmServer.Name + "-apm-http": int(b.ApmServer.Spec.NodeCount), } { endpoints, err := k.GetEndpoints(endpointName) if err != nil { diff --git a/operators/test/e2e/test/elasticsearch/steps_init.go b/operators/test/e2e/test/elasticsearch/steps_init.go index f2cf9785b9..a7afcd6731 100644 --- a/operators/test/e2e/test/elasticsearch/steps_init.go +++ b/operators/test/e2e/test/elasticsearch/steps_init.go @@ -67,7 +67,7 @@ func (b Builder) InitTestSteps(k *test.K8sClient) test.StepList { return err } if err == nil { - return fmt.Errorf("elasticsearch %s is still there", b.Elasticsearch.Name) + return fmt.Errorf("elasticsearch %s is still there", k8s.ExtractNamespacedName(&b.Elasticsearch)) } return nil })(t) diff --git a/operators/test/e2e/test/k8s_client.go b/operators/test/e2e/test/k8s_client.go index 953728c1f9..c1c1d7d69f 100644 --- a/operators/test/e2e/test/k8s_client.go +++ b/operators/test/e2e/test/k8s_client.go @@ -14,7 +14,7 @@ import ( assoctype "github.com/elastic/cloud-on-k8s/operators/pkg/apis/associations/v1alpha1" estype "github.com/elastic/cloud-on-k8s/operators/pkg/apis/elasticsearch/v1alpha1" kbtype "github.com/elastic/cloud-on-k8s/operators/pkg/apis/kibana/v1alpha1" - "github.com/elastic/cloud-on-k8s/operators/pkg/controller/apmserver" + apmlabels "github.com/elastic/cloud-on-k8s/operators/pkg/controller/apmserver/labels" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/certificates" "github.com/elastic/cloud-on-k8s/operators/pkg/controller/common/certificates/http" @@ -324,8 +324,8 @@ func ApmServerPodListOptions(apmName string) client.ListOptions { return client.ListOptions{ Namespace: Namespace, LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{ - common.TypeLabelName: apmserver.Type, - apmserver.ApmServerNameLabelName: apmName, + common.TypeLabelName: apmlabels.Type, + apmlabels.ApmServerNameLabelName: apmName, }))} } diff --git a/operators/test/e2e/test/kibana/checks_keystore.go b/operators/test/e2e/test/kibana/checks_keystore.go deleted file mode 100644 index b2be9b29a1..0000000000 --- a/operators/test/e2e/test/kibana/checks_keystore.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package kibana - -import ( - "fmt" - "reflect" - "strings" - - kbtype "github.com/elastic/cloud-on-k8s/operators/pkg/apis/kibana/v1alpha1" - "github.com/elastic/cloud-on-k8s/operators/pkg/utils/k8s" - "github.com/elastic/cloud-on-k8s/operators/test/e2e/test" - "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" -) - -const ( - keystoreBin = "/usr/share/kibana/bin/kibana-keystore" -) - -func CheckKibanaKeystoreEntries(k *test.K8sClient, kb kbtype.Kibana, expectedKeys []string) test.Step { - return test.Step{ - Name: "Kibana secure settings should eventually be set in all nodes keystore", - Test: test.Eventually(func() error { - pods, err := k.GetPods(test.KibanaPodListOptions(kb.Name)) - if err != nil { - return err - } - return test.OnAllPods(pods, func(p corev1.Pod) error { - // exec into the pod to list keystore entries - stdout, stderr, err := k.Exec(k8s.ExtractNamespacedName(&p), []string{keystoreBin, "list"}) - if err != nil { - return errors.Wrap(err, fmt.Sprintf("stdout:\n%s\nstderr:\n%s", stdout, stderr)) - } - - // parse entries from stdout - var entries []string - // remove trailing newlines and whitespaces - trimmed := strings.TrimSpace(stdout) - // split by lines, unless no output - if trimmed != "" { - entries = strings.Split(trimmed, "\n") - } - - if !reflect.DeepEqual(expectedKeys, entries) { - return fmt.Errorf("invalid keystore entries. Expected: %s. Actual: %s", expectedKeys, entries) - } - return nil - }) - }), - } -} diff --git a/operators/test/e2e/test/params.go b/operators/test/e2e/test/params.go index 6b2bfb79da..2a4b02a124 100644 --- a/operators/test/e2e/test/params.go +++ b/operators/test/e2e/test/params.go @@ -33,5 +33,5 @@ func init() { flag.Parse() logf.SetLogger(logf.ZapLogger(true)) - log.Info("Info", "version", ElasticStackVersion, "ns", Namespace) + log.Info("Info", "version", ElasticStackVersion, "namespace", Namespace) } diff --git a/operators/test/e2e/test/utils.go b/operators/test/e2e/test/utils.go index bbf66cbc20..5b020fcfe9 100644 --- a/operators/test/e2e/test/utils.go +++ b/operators/test/e2e/test/utils.go @@ -7,11 +7,17 @@ package test import ( "fmt" "os" + "reflect" + "strings" "testing" "time" + "github.com/elastic/cloud-on-k8s/operators/pkg/utils/k8s" "github.com/elastic/cloud-on-k8s/operators/pkg/utils/retry" + "github.com/pkg/errors" "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" ) const ( @@ -19,6 +25,39 @@ const ( defaultTimeout = 5 * time.Minute ) +func CheckKeystoreEntries(k *K8sClient, listOption client.ListOptions, KeystoreCmd []string, expectedKeys []string) Step { + return Step{ + Name: "secure settings should eventually be set in all nodes keystore", + Test: Eventually(func() error { + pods, err := k.GetPods(listOption) + if err != nil { + return err + } + return OnAllPods(pods, func(p corev1.Pod) error { + // exec into the pod to list keystore entries + stdout, stderr, err := k.Exec(k8s.ExtractNamespacedName(&p), append(KeystoreCmd, "list")) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("stdout:\n%s\nstderr:\n%s", stdout, stderr)) + } + + // parse entries from stdout + var entries []string + // remove trailing newlines and whitespaces + trimmed := strings.TrimSpace(stdout) + // split by lines, unless no output + if trimmed != "" { + entries = strings.Split(trimmed, "\n") + } + + if !reflect.DeepEqual(expectedKeys, entries) { + return fmt.Errorf("invalid keystore entries. Expected: %s. Actual: %s", expectedKeys, entries) + } + return nil + }) + }), + } +} + // ExitOnErr exits with code 1 if the given error is not nil func ExitOnErr(err error) { if err != nil {