diff --git a/.ci/jobs/elastic+helm-charts+master+cluster-cleanup.yml b/.ci/jobs/elastic+helm-charts+master+cluster-cleanup.yml
index 1467a2d44..37862f81a 100644
--- a/.ci/jobs/elastic+helm-charts+master+cluster-cleanup.yml
+++ b/.ci/jobs/elastic+helm-charts+master+cluster-cleanup.yml
@@ -21,8 +21,11 @@
#!/usr/local/bin/runbld
set -euo pipefail
+ source /usr/local/bin/bash_standard_lib.sh
+
set +x
- export VAULT_TOKEN=$(vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ VAULT_TOKEN=$(retry 5 vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ export VAULT_TOKEN
unset VAULT_ROLE_ID VAULT_SECRET_ID
set -x
diff --git a/.ci/jobs/elastic+helm-charts+master+cluster-creation.yml b/.ci/jobs/elastic+helm-charts+master+cluster-creation.yml
index 0a1bd9dab..c1698c76d 100644
--- a/.ci/jobs/elastic+helm-charts+master+cluster-creation.yml
+++ b/.ci/jobs/elastic+helm-charts+master+cluster-creation.yml
@@ -21,8 +21,11 @@
#!/usr/local/bin/runbld
set -euo pipefail
+ source /usr/local/bin/bash_standard_lib.sh
+
set +x
- export VAULT_TOKEN=$(vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ VAULT_TOKEN=$(retry 5 vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ export VAULT_TOKEN
unset VAULT_ROLE_ID VAULT_SECRET_ID
set -x
diff --git a/.ci/jobs/elastic+helm-charts+master+integration-elasticsearch.yml b/.ci/jobs/elastic+helm-charts+master+integration-elasticsearch.yml
index bf3f68450..7bb9ea7f6 100644
--- a/.ci/jobs/elastic+helm-charts+master+integration-elasticsearch.yml
+++ b/.ci/jobs/elastic+helm-charts+master+integration-elasticsearch.yml
@@ -25,8 +25,11 @@
#!/usr/local/bin/runbld
set -euo pipefail
+ source /usr/local/bin/bash_standard_lib.sh
+
set +x
- export VAULT_TOKEN=$(vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ VAULT_TOKEN=$(retry 5 vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ export VAULT_TOKEN
unset VAULT_ROLE_ID VAULT_SECRET_ID
set -x
diff --git a/.ci/jobs/elastic+helm-charts+master+integration-filebeat.yml b/.ci/jobs/elastic+helm-charts+master+integration-filebeat.yml
index 0e45fd1cc..661d5e993 100644
--- a/.ci/jobs/elastic+helm-charts+master+integration-filebeat.yml
+++ b/.ci/jobs/elastic+helm-charts+master+integration-filebeat.yml
@@ -25,8 +25,11 @@
#!/usr/local/bin/runbld
set -euo pipefail
+ source /usr/local/bin/bash_standard_lib.sh
+
set +x
- export VAULT_TOKEN=$(vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ VAULT_TOKEN=$(retry 5 vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ export VAULT_TOKEN
unset VAULT_ROLE_ID VAULT_SECRET_ID
set -x
diff --git a/.ci/jobs/elastic+helm-charts+master+integration-kibana.yml b/.ci/jobs/elastic+helm-charts+master+integration-kibana.yml
index 73ac04282..d689e9143 100644
--- a/.ci/jobs/elastic+helm-charts+master+integration-kibana.yml
+++ b/.ci/jobs/elastic+helm-charts+master+integration-kibana.yml
@@ -25,8 +25,11 @@
#!/usr/local/bin/runbld
set -euo pipefail
+ source /usr/local/bin/bash_standard_lib.sh
+
set +x
- export VAULT_TOKEN=$(vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ VAULT_TOKEN=$(retry 5 vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ export VAULT_TOKEN
unset VAULT_ROLE_ID VAULT_SECRET_ID
set -x
diff --git a/.ci/jobs/elastic+helm-charts+master+integration-metricbeat.yml b/.ci/jobs/elastic+helm-charts+master+integration-metricbeat.yml
index 6e3a4663a..480b700e2 100644
--- a/.ci/jobs/elastic+helm-charts+master+integration-metricbeat.yml
+++ b/.ci/jobs/elastic+helm-charts+master+integration-metricbeat.yml
@@ -25,8 +25,11 @@
#!/usr/local/bin/runbld
set -euo pipefail
+ source /usr/local/bin/bash_standard_lib.sh
+
set +x
- export VAULT_TOKEN=$(vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ VAULT_TOKEN=$(retry 5 vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ export VAULT_TOKEN
unset VAULT_ROLE_ID VAULT_SECRET_ID
set -x
diff --git a/.ci/jobs/elastic+helm-charts+pull-request+cluster-cleanup.yml b/.ci/jobs/elastic+helm-charts+pull-request+cluster-cleanup.yml
index 39521ca39..cbd0d55eb 100644
--- a/.ci/jobs/elastic+helm-charts+pull-request+cluster-cleanup.yml
+++ b/.ci/jobs/elastic+helm-charts+pull-request+cluster-cleanup.yml
@@ -21,8 +21,11 @@
#!/usr/local/bin/runbld
set -euo pipefail
+ source /usr/local/bin/bash_standard_lib.sh
+
set +x
- export VAULT_TOKEN=$(vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ VAULT_TOKEN=$(retry 5 vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ export VAULT_TOKEN
unset VAULT_ROLE_ID VAULT_SECRET_ID
set -x
diff --git a/.ci/jobs/elastic+helm-charts+pull-request+cluster-creation.yml b/.ci/jobs/elastic+helm-charts+pull-request+cluster-creation.yml
index ad0c27d8b..d1b491080 100644
--- a/.ci/jobs/elastic+helm-charts+pull-request+cluster-creation.yml
+++ b/.ci/jobs/elastic+helm-charts+pull-request+cluster-creation.yml
@@ -21,8 +21,11 @@
#!/usr/local/bin/runbld
set -euo pipefail
+ source /usr/local/bin/bash_standard_lib.sh
+
set +x
- export VAULT_TOKEN=$(vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ VAULT_TOKEN=$(retry 5 vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ export VAULT_TOKEN
unset VAULT_ROLE_ID VAULT_SECRET_ID
set -x
diff --git a/.ci/jobs/elastic+helm-charts+pull-request+integration-elasticsearch.yml b/.ci/jobs/elastic+helm-charts+pull-request+integration-elasticsearch.yml
index 438bc82db..a476a1d85 100644
--- a/.ci/jobs/elastic+helm-charts+pull-request+integration-elasticsearch.yml
+++ b/.ci/jobs/elastic+helm-charts+pull-request+integration-elasticsearch.yml
@@ -25,8 +25,11 @@
#!/usr/local/bin/runbld
set -euo pipefail
+ source /usr/local/bin/bash_standard_lib.sh
+
set +x
- export VAULT_TOKEN=$(vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ VAULT_TOKEN=$(retry 5 vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ export VAULT_TOKEN
unset VAULT_ROLE_ID VAULT_SECRET_ID
set -x
diff --git a/.ci/jobs/elastic+helm-charts+pull-request+integration-filebeat.yml b/.ci/jobs/elastic+helm-charts+pull-request+integration-filebeat.yml
index fe86038a3..3a1621fbb 100644
--- a/.ci/jobs/elastic+helm-charts+pull-request+integration-filebeat.yml
+++ b/.ci/jobs/elastic+helm-charts+pull-request+integration-filebeat.yml
@@ -25,8 +25,11 @@
#!/usr/local/bin/runbld
set -euo pipefail
+ source /usr/local/bin/bash_standard_lib.sh
+
set +x
- export VAULT_TOKEN=$(vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ VAULT_TOKEN=$(retry 5 vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ export VAULT_TOKEN
unset VAULT_ROLE_ID VAULT_SECRET_ID
set -x
diff --git a/.ci/jobs/elastic+helm-charts+pull-request+integration-kibana.yml b/.ci/jobs/elastic+helm-charts+pull-request+integration-kibana.yml
index f60f3ac29..ed2e6ec4a 100644
--- a/.ci/jobs/elastic+helm-charts+pull-request+integration-kibana.yml
+++ b/.ci/jobs/elastic+helm-charts+pull-request+integration-kibana.yml
@@ -25,8 +25,11 @@
#!/usr/local/bin/runbld
set -euo pipefail
+ source /usr/local/bin/bash_standard_lib.sh
+
set +x
- export VAULT_TOKEN=$(vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ VAULT_TOKEN=$(retry 5 vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ export VAULT_TOKEN
unset VAULT_ROLE_ID VAULT_SECRET_ID
set -x
diff --git a/.ci/jobs/elastic+helm-charts+pull-request+integration-metricbeat.yml b/.ci/jobs/elastic+helm-charts+pull-request+integration-metricbeat.yml
index fe53b9a70..34c6ac2fb 100644
--- a/.ci/jobs/elastic+helm-charts+pull-request+integration-metricbeat.yml
+++ b/.ci/jobs/elastic+helm-charts+pull-request+integration-metricbeat.yml
@@ -25,8 +25,11 @@
#!/usr/local/bin/runbld
set -euo pipefail
+ source /usr/local/bin/bash_standard_lib.sh
+
set +x
- export VAULT_TOKEN=$(vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ VAULT_TOKEN=$(retry 5 vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
+ export VAULT_TOKEN
unset VAULT_ROLE_ID VAULT_SECRET_ID
set -x
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
index 0f629a421..dc4801407 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -11,10 +11,11 @@ about: Create a report to help us improve
**Helm Version:**
-**Values.yaml:**
+**`helm get release` output**
+
+e.g. `helm get elasticsearch` (replace `elasticsearch` with the name of your helm release)
```
-key: value
```
**Describe the bug:**
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 27674b795..a5abeea73 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,9 +1,99 @@
-## Unreleased - XXXX/XX/XX
+# Changelog
-### Metricbeat (WIP)
+## 7.3.0 - 2019/07/31
+* 7.3.0 as the default stack version
+
+### Elasticsearch
+| PR | Author | Title |
+| ------------------------------------------------------- | ---------------------------------------------------------- | ------------------------------------------------------------------------- |
+| [#226](https://github.com/elastic/helm-charts/pull/226) | [@MichaelMarieJulie](https://github.com/MichaelMarieJulie) | Add configurable pods labels |
+| [#237](https://github.com/elastic/helm-charts/pull/237) | [@MichaelSp](https://github.com/MichaelSp) | Add back `service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"` |
+
+### Kibana
+| PR | Author | Title |
+| ------------------------------------------------------- | ------------------------------------------ | ----------------------------------- |
+| [#225](https://github.com/elastic/helm-charts/pull/225) | [@plumcraft](https://github.com/plumcraft) | Add configurable pod labels |
+| [#230](https://github.com/elastic/helm-charts/pull/230) | [@Crazybus](https://github.com/Crazybus) | Add subPath support to secretMounts |
+
+---
+## 7.2.1-0 - 2019/07/18
+
+* [#195](https://github.com/elastic/helm-charts/pull/195) - @cclauss - Initial steps started to move all python2 code to python3
+* [#205](https://github.com/elastic/helm-charts/pull/205) - @Crazybus - Fixup and improve security example documentation
+
+
+### Elasticsearch
+
+* [#171](https://github.com/elastic/helm-charts/pull/171) - @naseemkullah - Run Elasticsearch as a non-root user
+* [#197](https://github.com/elastic/helm-charts/pull/197) - @tetianakravchenko - Add option to provide custom start/stop hooks
+* [#206](https://github.com/elastic/helm-charts/pull/206) - @Crazybus - Automatically detect esMajorVersion for default images
+* [#203](https://github.com/elastic/helm-charts/pull/203) - @Crazybus - Add testing for security context
+* [#220](https://github.com/elastic/helm-charts/pull/220) - @JorisAndrade - Add option to disable sysctlInitContainer
+
+### Kibana
+
+* [#204](https://github.com/elastic/helm-charts/pull/204) - @Crazybus - Make imagePullPolicy actually do something
+* [#210](https://github.com/elastic/helm-charts/pull/210) - @cliedeman - Add Kibana pod annotations
+* [#217](https://github.com/elastic/helm-charts/pull/217) - @Crazybus - Update healthCheckPath to mention basePath usage
+
+### Filebeat
+
+* [#214](https://github.com/elastic/helm-charts/pull/214) - @dugouchet - Add additional labels
+
+### Metricbeat
+
+* [#127](https://github.com/elastic/helm-charts/pull/127) - @Crazybus - Add metricbeat chart
* [#128](https://github.com/elastic/helm-charts/pull/128) - @Crazybus - Add ci jobs for metricbeat
-* [#127](https://github.com/elastic/helm-charts/pull/127) - @Crazybus - WIP add metricbeat chart
+
+---
+## 7.2.0 - 2019/07/01
+
+* 7.2.0 as the default stack version
+* Updated the beta status messaging and added proper descriptions to each chart [#158](https://github.com/elastic/helm-charts/pull/158)
+* Add GKE 1.13 to automated testing suite [#169](https://github.com/elastic/helm-charts/pull/169) and [#181](https://github.com/elastic/helm-charts/pull/181)
+
+### Elasticsearch
+
+* [#123](https://github.com/elastic/helm-charts/pull/123) - @kimxogus - Make the service configurable
+* [#141](https://github.com/elastic/helm-charts/pull/141) - @satchpx - Add capability to specify alternate scheduler
+* [#161](https://github.com/elastic/helm-charts/pull/161) - @Crazybus - Add configurable nodePort to the service spec
+* [#170](https://github.com/elastic/helm-charts/pull/170) - @Crazybus - Update security example docs to match reality
+* [#182](https://github.com/elastic/helm-charts/pull/182) - @hxquangnhat - Fix secretName field for secretMounts
+* [#186](https://github.com/elastic/helm-charts/pull/186) - @Crazybus - Fix pvc annotations with multiple fields
+* [#189](https://github.com/elastic/helm-charts/pull/189) - @gnatpat - Add resources to sidecar container
+
+
+### Kibana
+
+* [#160](https://github.com/elastic/helm-charts/pull/160) - @Crazybus - Add configurable nodePort to the service spec
+* [#168](https://github.com/elastic/helm-charts/pull/168) - @Crazybus - Always set server.host to the docker default
+* [#172](https://github.com/elastic/helm-charts/pull/172) - @naseemkullah - Run Kibana as the non-root kibana user (1000)
+* [#182](https://github.com/elastic/helm-charts/pull/182) - @hxquangnhat - Fix secretName field for secretMounts
+* [#184](https://github.com/elastic/helm-charts/pull/184) - @diegofernandes - Fix wildcard support for ingress
+
+### Filebeat
+
+* [#182](https://github.com/elastic/helm-charts/pull/182) - @hxquangnhat - Fix secretName field for secretMounts
+* [#188](https://github.com/elastic/helm-charts/pull/188) - @cclauss - Fix octal literal to work in both Python 2 and Python 3
+
+---
+## 7.1.1 - 2019/06/07
+
+* 7.1.1 as the default stack version
+* Helm 2.14.0 as the tested version. Helm 2.14.0 has some extra validation built in which caused an issue with an [invalid field in the filebeat chart](https://github.com/elastic/helm-charts/issues/136).
+
+### Elasticsearch
+
+* [#146](https://github.com/elastic/helm-charts/pull/146) - @Crazybus - Add instructions for how to enable snapshots
+
+### Kibana
+
+* [#151](https://github.com/elastic/helm-charts/pull/151) - @natebwangsut - Added an option to add annotations(s) to service resource
+
+### Filebeat
+
+* [#140](https://github.com/elastic/helm-charts/pull/140) - @Crazybus - Remove fsGroup from container level security context
---
## 7.1.0 - 2019/05/21
diff --git a/README.md b/README.md
index bceddafa8..1c2732d68 100644
--- a/README.md
+++ b/README.md
@@ -2,19 +2,22 @@
[![Build Status](https://img.shields.io/jenkins/s/https/devops-ci.elastic.co/job/elastic+helm-charts+master.svg)](https://devops-ci.elastic.co/job/elastic+helm-charts+master/)
-This functionality is in beta status and may be changed or removed completely in a future release. Elastic will take a best effort approach to fix any issues, but beta features are not subject to the support SLA of official GA features.
+This functionality is in beta and is subject to change. The design and code is less mature than official GA features and is being provided as-is with no warranties. Beta features are not subject to the support SLA of official GA features.
## Charts
Please look in the chart directories for the documentation for each chart. These helm charts are designed to be a lightweight way to configure our official docker images. Links to the relevant docker image documentation has also been added below.
-* [Elasticsearch](./elasticsearch/README.md) - [docker image docs](https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html)
-* [Kibana](./kibana/README.md) - [docker image docs](https://www.elastic.co/guide/en/kibana/current/docker.html)
-* [Filebeat](./filebeat/README.md) - [docker image docs](https://www.elastic.co/guide/en/beats/filebeat/current/running-on-docker.html)
+| Chart | Docker documentation |
+| ------------------------------------------ | ------------------------------------------------------------------------------- |
+| [Elasticsearch](./elasticsearch/README.md) | https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html |
+| [Kibana](./kibana/README.md) | https://www.elastic.co/guide/en/kibana/current/docker.html |
+| [Filebeat](./filebeat/README.md) | https://www.elastic.co/guide/en/beats/filebeat/current/running-on-docker.html |
+| [Metricbeat](./metricbeat/README.md) | https://www.elastic.co/guide/en/beats/metricbeat/current/running-on-docker.html |
## Kubernetes versions
-The charts are [currently tested](https://devops-ci.elastic.co/job/elastic+helm-charts+master/) against all GKE versions available.
+The charts are [currently tested](https://devops-ci.elastic.co/job/elastic+helm-charts+master/) against all GKE versions available. The exact versions are defined under `KUBERNETES_VERSIONS` in [helpers/matrix.yml](/helpers/matrix.yml)
diff --git a/elasticsearch/Chart.yaml b/elasticsearch/Chart.yaml
index fa959b280..7a4f95cd1 100755
--- a/elasticsearch/Chart.yaml
+++ b/elasticsearch/Chart.yaml
@@ -1,11 +1,11 @@
-description: Elasticsearch
+description: Official Elastic helm chart for Elasticsearch
home: https://github.com/elastic/helm-charts
maintainers:
- email: helm-charts@elastic.co
name: Elastic
name: elasticsearch
-version: 7.1.0
-appVersion: 7.1.0
+version: 7.3.0
+appVersion: 7.3.0
sources:
- https://github.com/elastic/elasticsearch
icon: https://helm.elastic.co/icons/elasticsearch.png
diff --git a/elasticsearch/README.md b/elasticsearch/README.md
index c429d2bb5..92a222611 100644
--- a/elasticsearch/README.md
+++ b/elasticsearch/README.md
@@ -1,6 +1,6 @@
# Elasticsearch Helm Chart
-This functionality is in beta status and may be changed or removed completely in a future release. Elastic will take a best effort approach to fix any issues, but beta features are not subject to the support SLA of official GA features.
+This functionality is in beta and is subject to change. The design and code is less mature than official GA features and is being provided as-is with no warranties. Beta features are not subject to the support SLA of official GA features.
This helm chart is a lightweight way to configure and run our official [Elasticsearch docker image](https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html)
@@ -13,6 +13,7 @@ This helm chart is a lightweight way to configure and run our official [Elastics
* 1GB of RAM for the JVM heap
## Usage notes and getting started
+
* This repo includes a number of [example](./examples) configurations which can be used as a reference. They are also used in the automated testing of this chart
* Automated testing of this chart is currently only run against GKE (Google Kubernetes Engine). If you are using a different Kubernetes provider you will likely need to adjust the `storageClassName` in the `volumeClaimTemplate`
* The default storage class for GKE is `standard` which by default will give you `pd-ssd` type persistent volumes. This is network attached storage and will not perform as well as local storage. If you are using Kubernetes version 1.10 or greater you can use [Local PersistentVolumes](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/local-ssd) for increased performance
@@ -33,73 +34,84 @@ If you currently have a cluster deployed with the [helm/charts stable](https://g
```
* Install it
```
- helm install --name elasticsearch elastic/elasticsearch --version 7.1.0
+ helm install --name elasticsearch elastic/elasticsearch
```
## Compatibility
This chart is tested with the latest supported versions. The currently tested versions are:
-| 5.x | 6.x | 7.x |
-| ------ | ----- | ----- |
-| 5.6.16 | 6.8.0 | 7.1.0 |
+| 6.x | 7.x |
+| ----- | ----- |
+| 6.8.1 | 7.3.0 |
Examples of installing older major versions can be found in the [examples](./examples) directory.
-While only the latest releases are tested, it is possible to easily install old or new releases by overriding the `imageTag`. To install version `7.1.0` of Elasticsearch it would look like this:
+While only the latest releases are tested, it is possible to easily install old or new releases by overriding the `imageTag`. To install version `7.3.0` of Elasticsearch it would look like this:
```
-helm install --name elasticsearch elastic/elasticsearch --version 7.1.0 --set imageTag=7.1.0
+helm install --name elasticsearch elastic/elasticsearch --set imageTag=7.3.0
```
-
## Configuration
-| Parameter | Description | Default |
-| -------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- |
-| `clusterName` | This will be used as the Elasticsearch [cluster.name](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.name.html) and should be unique per cluster in the namespace | `elasticsearch` |
-| `nodeGroup` | This is the name that will be used for each group of nodes in the cluster. The name will be `clusterName-nodeGroup-X` | `master` |
-| `masterService` | Optional. The service name used to connect to the masters. You only need to set this if your master `nodeGroup` is set to something other than `master`. See [Clustering and Node Discovery](#clustering-and-node-discovery) for more information. | `` |
-| `roles` | A hash map with the [specific roles](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html) for the node group | `master: true`
`data: true`
`ingest: true` |
-| `replicas` | Kubernetes replica count for the statefulset (i.e. how many pods) | `3` |
-| `minimumMasterNodes` | The value for [discovery.zen.minimum_master_nodes](https://www.elastic.co/guide/en/elasticsearch/reference/6.7/discovery-settings.html#minimum_master_nodes). Should be set to `(master_eligible_nodes / 2) + 1`. Ignored in Elasticsearch versions >= 7. | `2` |
-| `esMajorVersion` | Used to set major version specific configuration | `7` |
-| `esConfig` | Allows you to add any config files in `/usr/share/elasticsearch/config/` such as `elasticsearch.yml` and `log4j2.properties`. See [values.yaml](./values.yaml) for an example of the formatting. | `{}` |
-| `extraEnvs` | Extra [environment variables](https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/#using-environment-variables-inside-of-your-config) which will be appended to the `env:` definition for the container | `[]` |
-| `extraVolumes` | Additional volumes to be passed to the `tpl` function | |
-| `extraVolumeMounts` | Additional volumeMounts to be passed to the `tpl` function | |
-| `extraInitContainers` | Additional init containers to be passed to the `tpl` function | |
-| `secretMounts` | Allows you easily mount a secret as a file inside the statefulset. Useful for mounting certificates and other secrets. See [values.yaml](./values.yaml) for an example | `[]` |
-| `image` | The Elasticsearch docker image | `docker.elastic.co/elasticsearch/elasticsearch` |
-| `imageTag` | The Elasticsearch docker image tag | `7.1.0` |
-| `imagePullPolicy` | The Kubernetes [imagePullPolicy](https://kubernetes.io/docs/concepts/containers/images/#updating-images) value | `IfNotPresent` |
-| `podAnnotations` | Configurable [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) applied to all Elasticsearch pods | `{}` |
-| `esJavaOpts` | [Java options](https://www.elastic.co/guide/en/elasticsearch/reference/current/jvm-options.html) for Elasticsearch. This is where you should configure the [jvm heap size](https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html) | `-Xmx1g -Xms1g` |
-| `resources` | Allows you to set the [resources](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) for the statefulset | `requests.cpu: 100m`
`requests.memory: 2Gi`
`limits.cpu: 1000m`
`limits.memory: 2Gi` |
-| `initResources` | Allows you to set the [resources](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) for the initContainer in the statefulset | {} |
-| `networkHost` | Value for the [network.host Elasticsearch setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/network.host.html) | `0.0.0.0` |
-| `volumeClaimTemplate` | Configuration for the [volumeClaimTemplate for statefulsets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-storage). You will want to adjust the storage (default `30Gi`) and the `storageClassName` if you are using a different storage class | `accessModes: [ "ReadWriteOnce" ]`
`resources.requests.storage: 30Gi` |
-| `persistence.annotations` | Additional persistence annotations for the `volumeClaimTemplate` | `{}` |
-| `persistence.enabled` | Enables a persistent volume for Elasticsearch data. Can be disabled for nodes that only have [roles](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html) which don't require persistent data. | `true` |
-| `priorityClassName` | The [name of the PriorityClass](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass). No default is supplied as the PriorityClass must be created first. | `` |
-| `antiAffinityTopologyKey` | The [anti-affinity topology key](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). By default this will prevent multiple Elasticsearch nodes from running on the same Kubernetes node | `kubernetes.io/hostname` |
-| `antiAffinity` | Setting this to hard enforces the [anti-affinity rules](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). If it is set to soft it will be done "best effort". Other values will be ignored. | `hard` |
-| `nodeAffinity` | Value for the [node affinity settings](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature) | `{}` |
-| `podManagementPolicy` | By default Kubernetes [deploys statefulsets serially](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies). This deploys them in parallel so that they can discover eachother | `Parallel` |
-| `protocol` | The protocol that will be used for the readinessProbe. Change this to `https` if you have `xpack.security.http.ssl.enabled` set | `http` |
-| `httpPort` | The http port that Kubernetes will use for the healthchecks and the service. If you change this you will also need to set [http.port](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#_settings) in `extraEnvs` | `9200` |
-| `transportPort` | The transport port that Kubernetes will use for the service. If you change this you will also need to set [transport port configuration](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-transport.html#_transport_settings) in `extraEnvs` | `9300` |
-| `updateStrategy` | The [updateStrategy](https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets) for the statefulset. By default Kubernetes will wait for the cluster to be green after upgrading each pod. Setting this to `OnDelete` will allow you to manually delete each pod during upgrades | `RollingUpdate` |
-| `maxUnavailable` | The [maxUnavailable](https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget) value for the pod disruption budget. By default this will prevent Kubernetes from having more than 1 unhealthy pod in the node group | `1` |
-| `fsGroup` | The Group ID (GID) for [securityContext.fsGroup](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) so that the Elasticsearch user can read from the persistent volume | `1000` |
-| `terminationGracePeriod` | The [terminationGracePeriod](https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods) in seconds used when trying to stop the pod | `120` |
-| `sysctlVmMaxMapCount` | Sets the [sysctl vm.max_map_count](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html#vm-max-map-count) needed for Elasticsearch | `262144` |
-| `readinessProbe` | Configuration fields for the [readinessProbe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/) | `failureThreshold: 3`
`initialDelaySeconds: 10`
`periodSeconds: 10`
`successThreshold: 3`
`timeoutSeconds: 5` |
-| `clusterHealthCheckParams` | The [Elasticsearch cluster health status params](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params) that will be used by readinessProbe command | `wait_for_status=green&timeout=1s` |
-| `imagePullSecrets` | Configuration for [imagePullSecrets](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret) so that you can use a private registry for your image | `[]` |
-| `nodeSelector` | Configurable [nodeSelector](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) so that you can target specific nodes for your Elasticsearch cluster | `{}` |
-| `tolerations` | Configurable [tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) | `[]` |
-| `ingress` | Configurable [ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) to expose the Elasticsearch service. See [`values.yaml`](./values.yaml) for an example | `enabled: false` |
+| Parameter | Description | Default |
+| ----------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- |
+| `clusterName` | This will be used as the Elasticsearch [cluster.name](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.name.html) and should be unique per cluster in the namespace | `elasticsearch` |
+| `nodeGroup` | This is the name that will be used for each group of nodes in the cluster. The name will be `clusterName-nodeGroup-X` | `master` |
+| `masterService` | Optional. The service name used to connect to the masters. You only need to set this if your master `nodeGroup` is set to something other than `master`. See [Clustering and Node Discovery](#clustering-and-node-discovery) for more information. | `` |
+| `roles` | A hash map with the [specific roles](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html) for the node group | `master: true`
`data: true`
`ingest: true` |
+| `replicas` | Kubernetes replica count for the statefulset (i.e. how many pods) | `3` |
+| `minimumMasterNodes` | The value for [discovery.zen.minimum_master_nodes](https://www.elastic.co/guide/en/elasticsearch/reference/6.7/discovery-settings.html#minimum_master_nodes). Should be set to `(master_eligible_nodes / 2) + 1`. Ignored in Elasticsearch versions >= 7. | `2` |
+| `esMajorVersion` | Used to set major version specific configuration. If you are using a custom image and not running the default Elasticsearch version you will need to set this to the version you are running (e.g. `esMajorVersion: 6`) | `""` |
+| `esConfig` | Allows you to add any config files in `/usr/share/elasticsearch/config/` such as `elasticsearch.yml` and `log4j2.properties`. See [values.yaml](./values.yaml) for an example of the formatting. | `{}` |
+| `extraEnvs` | Extra [environment variables](https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/#using-environment-variables-inside-of-your-config) which will be appended to the `env:` definition for the container | `[]` |
+| `extraVolumes` | Templatable string of additional volumes to be passed to the `tpl` function | `""` |
+| `extraVolumeMounts` | Templatable string of additional volumeMounts to be passed to the `tpl` function | `""` |
+| `extraInitContainers` | Templatable string of additional init containers to be passed to the `tpl` function | `""` |
+| `secretMounts` | Allows you easily mount a secret as a file inside the statefulset. Useful for mounting certificates and other secrets. See [values.yaml](./values.yaml) for an example | `[]` |
+| `image` | The Elasticsearch docker image | `docker.elastic.co/elasticsearch/elasticsearch` |
+| `imageTag` | The Elasticsearch docker image tag | `7.3.0` |
+| `imagePullPolicy` | The Kubernetes [imagePullPolicy](https://kubernetes.io/docs/concepts/containers/images/#updating-images) value | `IfNotPresent` |
+| `podAnnotations` | Configurable [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) applied to all Elasticsearch pods | `{}` |
+| `labels` | Configurable [label](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) applied to all Elasticsearch pods | `{}` |
+| `esJavaOpts` | [Java options](https://www.elastic.co/guide/en/elasticsearch/reference/current/jvm-options.html) for Elasticsearch. This is where you should configure the [jvm heap size](https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html) | `-Xmx1g -Xms1g` |
+| `resources` | Allows you to set the [resources](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) for the statefulset | `requests.cpu: 100m`
`requests.memory: 2Gi`
`limits.cpu: 1000m`
`limits.memory: 2Gi` |
+| `initResources` | Allows you to set the [resources](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) for the initContainer in the statefulset | {} |
+| `sidecarResources` | Allows you to set the [resources](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) for the sidecar containers in the statefulset | {} |
+| `networkHost` | Value for the [network.host Elasticsearch setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/network.host.html) | `0.0.0.0` |
+| `volumeClaimTemplate` | Configuration for the [volumeClaimTemplate for statefulsets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-storage). You will want to adjust the storage (default `30Gi`) and the `storageClassName` if you are using a different storage class | `accessModes: [ "ReadWriteOnce" ]`
`resources.requests.storage: 30Gi` |
+| `persistence.annotations` | Additional persistence annotations for the `volumeClaimTemplate` | `{}` |
+| `persistence.enabled` | Enables a persistent volume for Elasticsearch data. Can be disabled for nodes that only have [roles](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html) which don't require persistent data. | `true` |
+| `priorityClassName` | The [name of the PriorityClass](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass). No default is supplied as the PriorityClass must be created first. | `""` |
+| `antiAffinityTopologyKey` | The [anti-affinity topology key](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). By default this will prevent multiple Elasticsearch nodes from running on the same Kubernetes node | `kubernetes.io/hostname` |
+| `antiAffinity` | Setting this to hard enforces the [anti-affinity rules](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). If it is set to soft it will be done "best effort". Other values will be ignored. | `hard` |
+| `nodeAffinity` | Value for the [node affinity settings](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature) | `{}` |
+| `podManagementPolicy` | By default Kubernetes [deploys statefulsets serially](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies). This deploys them in parallel so that they can discover eachother | `Parallel` |
+| `protocol` | The protocol that will be used for the readinessProbe. Change this to `https` if you have `xpack.security.http.ssl.enabled` set | `http` |
+| `httpPort` | The http port that Kubernetes will use for the healthchecks and the service. If you change this you will also need to set [http.port](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#_settings) in `extraEnvs` | `9200` |
+| `transportPort` | The transport port that Kubernetes will use for the service. If you change this you will also need to set [transport port configuration](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-transport.html#_transport_settings) in `extraEnvs` | `9300` |
+| `service.type` | Type of elasticsearch service. [Service Types](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) | `ClusterIP` |
+| `service.nodePort` | Custom [nodePort](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport) port that can be set if you are using `service.type: nodePort`. | `` |
+| `service.annotations` | Annotations that Kubernetes will use for the service. This will configure load balancer if `service.type` is `LoadBalancer` [Annotations](https://kubernetes.io/docs/concepts/services-networking/service/#ssl-support-on-aws) | `{}` |
+| `updateStrategy` | The [updateStrategy](https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets) for the statefulset. By default Kubernetes will wait for the cluster to be green after upgrading each pod. Setting this to `OnDelete` will allow you to manually delete each pod during upgrades | `RollingUpdate` |
+| `maxUnavailable` | The [maxUnavailable](https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget) value for the pod disruption budget. By default this will prevent Kubernetes from having more than 1 unhealthy pod in the node group | `1` |
+| `fsGroup (DEPRECATED)` | The Group ID (GID) for [securityContext.fsGroup](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) so that the Elasticsearch user can read from the persistent volume | `` |
+| `podSecurityContext` | Allows you to set the [securityContext](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod) for the pod | `fsGroup: 1000` |
+| `securityContext` | Allows you to set the [securityContext](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container) for the container | `capabilities.drop:[ALL]`
`runAsNonRoot: true`
`runAsUser: 1000` |
+| `terminationGracePeriod` | The [terminationGracePeriod](https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods) in seconds used when trying to stop the pod | `120` |
+| `sysctlInitContainer.enabled` | Allows you to disable the sysctlInitContainer if you are setting vm.max_map_count with another method | `true` |
+| `sysctlVmMaxMapCount` | Sets the [sysctl vm.max_map_count](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html#vm-max-map-count) needed for Elasticsearch | `262144` |
+| `readinessProbe` | Configuration fields for the [readinessProbe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/) | `failureThreshold: 3`
`initialDelaySeconds: 10`
`periodSeconds: 10`
`successThreshold: 3`
`timeoutSeconds: 5` |
+| `clusterHealthCheckParams` | The [Elasticsearch cluster health status params](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params) that will be used by readinessProbe command | `wait_for_status=green&timeout=1s` |
+| `imagePullSecrets` | Configuration for [imagePullSecrets](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret) so that you can use a private registry for your image | `[]` |
+| `nodeSelector` | Configurable [nodeSelector](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) so that you can target specific nodes for your Elasticsearch cluster | `{}` |
+| `tolerations` | Configurable [tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) | `[]` |
+| `ingress` | Configurable [ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) to expose the Elasticsearch service. See [`values.yaml`](./values.yaml) for an example | `enabled: false` |
+| `schedulerName` | Name of the [alternate scheduler](https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/#specify-schedulers-for-pods) | `nil` |
+| `masterTerminationFix` | A workaround needed for Elasticsearch < 7.2 to prevent master status being lost during restarts [#63](https://github.com/elastic/helm-charts/issues/63) | `false` |
+| `lifecycle` | Allows you to add lifecycle configuration. See [values.yaml](./values.yaml) for an example of the formatting. | `{}` |
+| `keystore` | Allows you map Kubernetes secrets into the keystore. See the [config example](/elasticsearch/examples/config/values.yaml) and [how to use the keystore](#how-to-use-the-keystore) | `[]` |
| `rbac` | Configuration for creating a role, role binding and service account as part of this helm chart with `create: true`. Also can be used to reference an external service account with `serviceAccountName: "externalServiceAccountName"`. | `create: false`
`serviceAccountName: ""`
| `podSecurityPolicy` | Configuration for create a pod security policy with minimal permissions to run this Helm chart with `create: true`. Also can be used to reference an external pod security policy with `name: "externalPodSecurityPolicy"` | `create: false`
`name: ""`
@@ -127,32 +139,15 @@ make
### Security
-A cluster with X-Pack security enabled
+A cluster with node to node security and https enabled. This example uses autogenerated certificates and password, for a production deployment you want to generate SSL certificates following the [official docs](https://www.elastic.co/guide/en/elasticsearch/reference/current/configuring-tls.html#node-certificates).
-* Generate SSL certificates following the [official docs]( https://www.elastic.co/guide/en/elasticsearch/reference/6.7/configuring-tls.html#node-certificates)
-* Create Kubernetes secrets for authentication credentials and certificates
- ```
- kubectl create secret generic elastic-credentials --from-literal=password=changeme --from-literal=username=elastic
- kubectl create secret generic elastic-certificates --from-file=elastic-certificates.p12
- ```
-* Deploy!
+* Generate the certificates and install Elasticsearch
```
cd examples/security
make
- ```
-* Attach into one of the containers
- ```
- kubectl exec -ti $(kubectl get pods -l release=helm-es-security -o name | awk -F'/' '{ print $NF }' | head -n 1) bash
- ```
-* Test that authentication is now enabled
- ```
- curl 'http://localhost:9200/' # This one will fail
- curl -u elastic:changeme 'http://localhost:9200/'
- ```
-* Install some test data to play around with
- ```
- wget https://download.elastic.co/demos/kibana/gettingstarted/logs.jsonl.gz && gunzip logs.jsonl.gz && curl -u elastic:changeme -H 'Content-Type: application/x-ndjson' -XPOST 'localhost:9200/_bulk?pretty' --data-binary @logs.jsonl
+ # Run a curl command to interact with the cluster
+ kubectl exec -ti security-master-0 -- sh -c 'curl -u $ELASTIC_USERNAME:$ELASTIC_PASSWORD -k https://localhost:9200/_cluster/health?pretty'
```
### FAQ
@@ -179,19 +174,62 @@ There are a couple reasons we recommend this.
#### How to use the keystore?
-1. Create a Kubernetes secret containing the [keystore](https://www.elastic.co/guide/en/elasticsearch/reference/current/secure-settings.html)
- ```
- $ kubectl create secret generic elasticsearch-keystore --from-file=./elasticsearch.keystore
- ```
-2. Mount it into the container via `secretMounts`
- ```
- secretMounts:
- - name: elasticsearch-keystore
- secretName: elasticsearch-keystore
- path: /usr/share/elasticsearch/config/elasticsearch.keystore
- subPath: elasticsearch.keystore
- ```
+##### Basic example
+
+Create the secret, the key name needs to be the keystore key path. In this example we will create a secret from a file and from a literal string.
+
+```
+kubectl create secret generic encryption_key --from-file=xpack.watcher.encryption_key=./watcher_encryption_key
+kubectl create secret generic slack_hook --from-literal=xpack.notification.slack.account.monitoring.secure_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd'
+```
+
+To add these secrets to the keystore:
+```
+keystore:
+ - secretName: encryption_key
+ - secretName: slack_hook
+```
+
+##### Multiple keys
+
+All keys in the secret will be added to the keystore. To create the previous example in one secret you could also do:
+
+```
+kubectl create secret generic keystore_secrets --from-file=xpack.watcher.encryption_key=./watcher_encryption_key --from-literal=xpack.notification.slack.account.monitoring.secure_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd'
+```
+
+```
+keystore:
+ - secretName: keystore_secrets
+```
+
+##### Custom paths and keys
+
+If you are using these secrets for other applications (besides the Elasticsearch keystore) then it is also possible to specify the keystore path and which keys you want to add. Everything specified under each `keystore` item will be passed through to the `volumeMounts` section for [mounting the secret](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets). In this example we will only add the `slack_hook` key from a secret that also has other keys. Our secret looks like this:
+
+```
+kubectl create secret generic slack_secrets --from-literal=slack_channel='#general' --from-literal=slack_hook='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd'
+```
+
+We only want to add the `slack_hook` key to the keystore at path `xpack.notification.slack.account.monitoring.secure_url`.
+
+```
+keystore:
+ - secretName: slack_secrets
+ items:
+ - key: slack_hook
+ path: xpack.notification.slack.account.monitoring.secure_url
+```
+
+You can also take a look at the [config example](/elasticsearch/examples/config/) which is used as part of the automated testing pipeline.
+
+#### How to enable snapshotting?
+
+1. Install your [snapshot plugin](https://www.elastic.co/guide/en/elasticsearch/plugins/current/repository.html) into a custom docker image following the [how to install plugins guide](/elasticsearch/README.md#how-to-install-plugins)
+2. Add any required secrets or credentials into an Elasticsearch keystore following the [how to use the keystore guide](/elasticsearch/README.md#how-to-use-the-keystore)
+3. Configure the [snapshot repository](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html) as you normally would.
+4. To automate snapshots you can use a tool like [curator](https://www.elastic.co/guide/en/elasticsearch/client/curator/current/snapshot.html). In the future there are plans to have Elasticsearch manage automated snapshots with [Snapshot Lifecycle Management](https://github.com/elastic/elasticsearch/issues/38461).
### Local development environments
@@ -213,7 +251,6 @@ make
Note that if `helm` or `kubectl` timeouts occur, you may consider creating a minikube VM with more CPU cores or memory allocated.
-
#### Docker for Mac - Kubernetes
It is also possible to run this chart with the built in Kubernetes cluster that comes with [docker-for-mac](https://docs.docker.com/docker-for-mac/kubernetes/).
@@ -261,6 +298,7 @@ make test
Integration tests are run using [goss](https://github.com/aelsabbahy/goss/blob/master/docs/manual.md) which is a serverspec like tool written in golang. See [goss.yaml](examples/default/test/goss.yaml) for an example of what the tests look like.
To run the goss tests against the default example:
+
```
cd examples/default
make goss
diff --git a/elasticsearch/examples/5.x/Makefile b/elasticsearch/examples/5.x/Makefile
deleted file mode 100644
index 5c58bfed3..000000000
--- a/elasticsearch/examples/5.x/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-default: test
-
-include ../../../helpers/examples.mk
-
-RELEASE := helm-es-fivex
-
-install:
- helm upgrade --wait --timeout=600 --install $(RELEASE) --values ./values.yaml ../../ ; \
-
-restart:
- helm upgrade --set terminationGracePeriod=121 --wait --timeout=600 --install $(RELEASE) ../../ ; \
-
-purge:
- helm del --purge $(RELEASE)
-
-secrets:
- kubectl delete secrets elastic-fivex-credentials || true
- kubectl create secret generic elastic-fivex-credentials --from-literal=password=changeme --from-literal=username=elastic
-
-test: secrets install goss
diff --git a/elasticsearch/examples/5.x/test/goss.yaml b/elasticsearch/examples/5.x/test/goss.yaml
deleted file mode 100644
index f9afdeb74..000000000
--- a/elasticsearch/examples/5.x/test/goss.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-http:
- http://localhost:9200/_cluster/health:
- status: 200
- timeout: 2000
- username: '{{ .Env.ELASTIC_USERNAME }}'
- password: '{{ .Env.ELASTIC_PASSWORD }}'
- body:
- - 'green'
- - '"number_of_nodes":3'
- - '"number_of_data_nodes":3'
-
- http://localhost:9200/:
- status: 200
- timeout: 2000
- username: '{{ .Env.ELASTIC_USERNAME }}'
- password: '{{ .Env.ELASTIC_PASSWORD }}'
- body:
- - '"number" : "5.6.16"'
- - '"cluster_name" : "fivex"'
- - '"name" : "fivex-master-0"'
- - 'You Know, for Search'
diff --git a/elasticsearch/examples/5.x/values.yaml b/elasticsearch/examples/5.x/values.yaml
deleted file mode 100644
index 6736b650e..000000000
--- a/elasticsearch/examples/5.x/values.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-
-clusterName: "fivex"
-imageTag: "5.6.16"
-esMajorVersion: 5
-
-extraEnvs:
- - name: ELASTIC_PASSWORD
- valueFrom:
- secretKeyRef:
- name: elastic-fivex-credentials
- key: password
- - name: ELASTIC_USERNAME
- valueFrom:
- secretKeyRef:
- name: elastic-fivex-credentials
- key: username
diff --git a/elasticsearch/examples/6.x/test/goss.yaml b/elasticsearch/examples/6.x/test/goss.yaml
index f06a0853e..cf6ea4200 100644
--- a/elasticsearch/examples/6.x/test/goss.yaml
+++ b/elasticsearch/examples/6.x/test/goss.yaml
@@ -11,7 +11,7 @@ http:
status: 200
timeout: 2000
body:
- - '"number" : "6.8.0"'
+ - '"number" : "6.8.1"'
- '"cluster_name" : "six"'
- '"name" : "six-master-0"'
- 'You Know, for Search'
diff --git a/elasticsearch/examples/6.x/values.yaml b/elasticsearch/examples/6.x/values.yaml
index 67bc5d833..520335482 100644
--- a/elasticsearch/examples/6.x/values.yaml
+++ b/elasticsearch/examples/6.x/values.yaml
@@ -1,5 +1,4 @@
---
clusterName: "six"
-imageTag: "6.8.0"
-esMajorVersion: 6
+imageTag: "6.8.1"
diff --git a/elasticsearch/examples/config/Makefile b/elasticsearch/examples/config/Makefile
new file mode 100644
index 000000000..cf9c1f441
--- /dev/null
+++ b/elasticsearch/examples/config/Makefile
@@ -0,0 +1,19 @@
+default: test
+include ../../../helpers/examples.mk
+
+RELEASE := helm-es-config
+
+install:
+ helm upgrade --wait --timeout=600 --install $(RELEASE) --values ./values.yaml ../../ ; \
+
+secrets:
+ kubectl delete secret elastic-config-credentials elastic-config-secret elastic-config-slack elastic-config-custom-path || true
+ kubectl create secret generic elastic-config-credentials --from-literal=password=changeme --from-literal=username=elastic
+ kubectl create secret generic elastic-config-slack --from-literal=xpack.notification.slack.account.monitoring.secure_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd'
+ kubectl create secret generic elastic-config-secret --from-file=xpack.watcher.encryption_key=./watcher_encryption_key
+ kubectl create secret generic elastic-config-custom-path --from-literal=slack_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' --from-literal=thing_i_don_tcare_about=test
+
+test: secrets install goss
+
+purge:
+ helm del --purge $(RELEASE)
diff --git a/elasticsearch/examples/config/README.md b/elasticsearch/examples/config/README.md
new file mode 100644
index 000000000..d98d836bf
--- /dev/null
+++ b/elasticsearch/examples/config/README.md
@@ -0,0 +1,3 @@
+# Config
+
+An example testing suite for testing some of the optional features of this chart.
diff --git a/elasticsearch/examples/config/test/goss.yaml b/elasticsearch/examples/config/test/goss.yaml
new file mode 100644
index 000000000..848701370
--- /dev/null
+++ b/elasticsearch/examples/config/test/goss.yaml
@@ -0,0 +1,26 @@
+http:
+ http://localhost:9200/_cluster/health:
+ status: 200
+ timeout: 2000
+ body:
+ - 'green'
+ - '"number_of_nodes":1'
+ - '"number_of_data_nodes":1'
+
+ http://localhost:9200:
+ status: 200
+ timeout: 2000
+ body:
+ - '"cluster_name" : "config"'
+ - '"name" : "config-master-0"'
+ - 'You Know, for Search'
+
+command:
+ "elasticsearch-keystore list":
+ exit-status: 0
+ stdout:
+ - keystore.seed
+ - bootstrap.password
+ - xpack.notification.slack.account.monitoring.secure_url
+ - xpack.notification.slack.account.otheraccount.secure_url
+ - xpack.watcher.encryption_key
diff --git a/elasticsearch/examples/config/values.yaml b/elasticsearch/examples/config/values.yaml
new file mode 100644
index 000000000..ebde4f4d9
--- /dev/null
+++ b/elasticsearch/examples/config/values.yaml
@@ -0,0 +1,31 @@
+---
+
+clusterName: "config"
+replicas: 1
+
+extraEnvs:
+ - name: ELASTIC_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: elastic-credentials
+ key: password
+ - name: ELASTIC_USERNAME
+ valueFrom:
+ secretKeyRef:
+ name: elastic-credentials
+ key: username
+
+# This is just a dummy file to make sure that
+# the keystore can be mounted at the same time
+# as a custom elasticsearch.yml
+esConfig:
+ elasticsearch.yml: |
+ path.data: /usr/share/elasticsearch/data
+
+keystore:
+ - secretName: elastic-config-secret
+ - secretName: elastic-config-slack
+ - secretName: elastic-config-custom-path
+ items:
+ - key: slack_url
+ path: xpack.notification.slack.account.otheraccount.secure_url
diff --git a/elasticsearch/examples/config/watcher_encryption_key b/elasticsearch/examples/config/watcher_encryption_key
new file mode 100644
index 000000000..b5f907866
--- /dev/null
+++ b/elasticsearch/examples/config/watcher_encryption_key
@@ -0,0 +1 @@
+supersecret
diff --git a/elasticsearch/examples/default/test/goss.yaml b/elasticsearch/examples/default/test/goss.yaml
index 29e29dd98..d2c59dd7e 100644
--- a/elasticsearch/examples/default/test/goss.yaml
+++ b/elasticsearch/examples/default/test/goss.yaml
@@ -1,15 +1,9 @@
-port:
- tcp6:9200:
- listening: true
- ip:
- - '::'
-
kernel-param:
vm.max_map_count:
value: '262144'
http:
- http://localhost:9200/_cluster/health:
+ http://elasticsearch-master:9200/_cluster/health:
status: 200
timeout: 2000
body:
@@ -21,7 +15,7 @@ http:
status: 200
timeout: 2000
body:
- - '"number" : "7.1.0"'
+ - '"number" : "7.3.0"'
- '"cluster_name" : "elasticsearch"'
- '"name" : "elasticsearch-master-0"'
- 'You Know, for Search'
diff --git a/elasticsearch/examples/oss/test/goss.yaml b/elasticsearch/examples/oss/test/goss.yaml
index 305b16a45..769a6687f 100644
--- a/elasticsearch/examples/oss/test/goss.yaml
+++ b/elasticsearch/examples/oss/test/goss.yaml
@@ -11,7 +11,7 @@ http:
status: 200
timeout: 2000
body:
- - '"number" : "7.1.0"'
+ - '"number" : "7.3.0"'
- '"cluster_name" : "oss"'
- '"name" : "oss-master-0"'
- 'You Know, for Search'
diff --git a/elasticsearch/examples/security/Makefile b/elasticsearch/examples/security/Makefile
index 881908c07..827bdc56c 100644
--- a/elasticsearch/examples/security/Makefile
+++ b/elasticsearch/examples/security/Makefile
@@ -8,15 +8,24 @@ install:
helm upgrade --wait --timeout=600 --install --values ./security.yml $(RELEASE) ../../ ; \
purge:
+ kubectl delete secrets elastic-credentials elastic-certificates elastic-certificate-pem || true
helm del --purge $(RELEASE)
test: secrets install goss
secrets:
- kubectl delete secrets elastic-credentials elastic-certificates elastic-certificate-pem || true && \
- vault read -field=value secret/devops-ci/helm-charts/elasticsearch/security/certificates | base64 --decode > elastic-certificates.p12 && \
- vault read -field=value secret/devops-ci/helm-charts/elasticsearch/security/certificate-pem | base64 --decode > elastic-certificate.pem && \
- kubectl create secret generic elastic-credentials --from-literal=password=changeme --from-literal=username=elastic && \
+ docker rm -f elastic-helm-charts-certs || true
+ rm -f elastic-certificates.p12 elastic-certificate.pem elastic-stack-ca.p12 || true
+ password=$$([ ! -z "$$ELASTIC_PASSWORD" ] && echo $$ELASTIC_PASSWORD || echo $$(docker run --rm docker.elastic.co/elasticsearch/elasticsearch:$(STACK_VERSION) /bin/sh -c "< /dev/urandom tr -cd '[:alnum:]' | head -c20")) && \
+ docker run --name elastic-helm-charts-certs -i -w /app \
+ docker.elastic.co/elasticsearch/elasticsearch:$(STACK_VERSION) \
+ /bin/sh -c " \
+ elasticsearch-certutil ca --out /app/elastic-stack-ca.p12 --pass '' && \
+ elasticsearch-certutil cert --name security-master --ca /app/elastic-stack-ca.p12 --pass '' --ca-pass '' --out /app/elastic-certificates.p12" && \
+ docker cp elastic-helm-charts-certs:/app/elastic-certificates.p12 ./ && \
+ docker rm -f elastic-helm-charts-certs && \
+ openssl pkcs12 -nodes -passin pass:'' -in elastic-certificates.p12 -out elastic-certificate.pem && \
kubectl create secret generic elastic-certificates --from-file=elastic-certificates.p12 && \
kubectl create secret generic elastic-certificate-pem --from-file=elastic-certificate.pem && \
- rm -f elastic-certificates.p12 elastic-certificate.pem
+ kubectl create secret generic elastic-credentials --from-literal=password=$$password --from-literal=username=elastic && \
+ rm -f elastic-certificates.p12 elastic-certificate.pem elastic-stack-ca.p12
diff --git a/elasticsearch/examples/security/test/goss.yaml b/elasticsearch/examples/security/test/goss.yaml
index a2777635c..c6d4b987b 100644
--- a/elasticsearch/examples/security/test/goss.yaml
+++ b/elasticsearch/examples/security/test/goss.yaml
@@ -1,5 +1,5 @@
http:
- https://localhost:9200/_cluster/health:
+ https://security-master:9200/_cluster/health:
status: 200
timeout: 2000
allow-insecure: true
diff --git a/elasticsearch/examples/upgrade/test/goss.yaml b/elasticsearch/examples/upgrade/test/goss.yaml
index 95ad14304..ac71ba348 100644
--- a/elasticsearch/examples/upgrade/test/goss.yaml
+++ b/elasticsearch/examples/upgrade/test/goss.yaml
@@ -11,7 +11,7 @@ http:
status: 200
timeout: 2000
body:
- - '"number" : "7.1.0"'
+ - '"number" : "7.3.0"'
- '"cluster_name" : "upgrade"'
- '"name" : "upgrade-master-0"'
- 'You Know, for Search'
diff --git a/elasticsearch/templates/_helpers.tpl b/elasticsearch/templates/_helpers.tpl
index bb50c198f..6ca92a5a3 100755
--- a/elasticsearch/templates/_helpers.tpl
+++ b/elasticsearch/templates/_helpers.tpl
@@ -34,3 +34,16 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
{{ $uname }}-{{ $i }},
{{- end -}}
{{- end -}}
+
+{{- define "esMajorVersion" -}}
+{{- if .Values.esMajorVersion -}}
+{{ .Values.esMajorVersion }}
+{{- else -}}
+{{- $version := int (index (.Values.imageTag | splitList ".") 0) -}}
+ {{- if and (contains "docker.elastic.co/elasticsearch/elasticsearch" .Values.image) (not (eq $version 0)) -}}
+{{ $version }}
+ {{- else -}}
+7
+ {{- end -}}
+{{- end -}}
+{{- end -}}
diff --git a/elasticsearch/templates/configmap.yaml b/elasticsearch/templates/configmap.yaml
index fe5f0c3b8..78074a804 100644
--- a/elasticsearch/templates/configmap.yaml
+++ b/elasticsearch/templates/configmap.yaml
@@ -1,5 +1,4 @@
{{- if .Values.esConfig }}
----
apiVersion: v1
kind: ConfigMap
metadata:
diff --git a/elasticsearch/templates/service.yaml b/elasticsearch/templates/service.yaml
index 6bef0f90c..8342bb65e 100644
--- a/elasticsearch/templates/service.yaml
+++ b/elasticsearch/templates/service.yaml
@@ -3,7 +3,15 @@ kind: Service
apiVersion: v1
metadata:
name: {{ template "uname" . }}
+ labels:
+ heritage: {{ .Release.Service | quote }}
+ release: {{ .Release.Name | quote }}
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ app: "{{ template "uname" . }}"
+ annotations:
+{{ toYaml .Values.service.annotations | indent 4 }}
spec:
+ type: {{ .Values.service.type }}
selector:
heritage: {{ .Release.Service | quote }}
release: {{ .Release.Name | quote }}
@@ -13,6 +21,9 @@ spec:
- name: http
protocol: TCP
port: {{ .Values.httpPort }}
+{{- if .Values.service.nodePort }}
+ nodePort: {{ .Values.service.nodePort }}
+{{- end }}
- name: transport
protocol: TCP
port: {{ .Values.transportPort }}
@@ -27,10 +38,11 @@ metadata:
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
app: "{{ template "uname" . }}"
annotations:
- # Create endpoints also if the related pod isn't ready
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
clusterIP: None # This is needed for statefulset hostnames like elasticsearch-0 to resolve
+ # Create endpoints also if the related pod isn't ready
+ publishNotReadyAddresses: true
selector:
app: "{{ template "uname" . }}"
ports:
diff --git a/elasticsearch/templates/statefulset.yaml b/elasticsearch/templates/statefulset.yaml
index cd8bb4341..09637b2f5 100644
--- a/elasticsearch/templates/statefulset.yaml
+++ b/elasticsearch/templates/statefulset.yaml
@@ -8,6 +8,11 @@ metadata:
release: {{ .Release.Name | quote }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
app: "{{ template "uname" . }}"
+ {{- range $key, $value := .Values.labels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ annotations:
+ esMajorVersion: "{{ include "esMajorVersion" . }}"
spec:
serviceName: {{ template "uname" . }}-headless
selector:
@@ -23,7 +28,7 @@ spec:
name: {{ template "uname" . }}
{{- with .Values.persistence.annotations }}
annotations:
- {{ toYaml . | indent 4 }}
+{{ toYaml . | indent 8 }}
{{- end }}
spec:
{{ toYaml .Values.volumeClaimTemplate | indent 6 }}
@@ -45,8 +50,14 @@ spec:
configchecksum: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum | trunc 63 }}
{{- end }}
spec:
+ {{- if .Values.schedulerName }}
+ schedulerName: "{{ .Values.schedulerName }}"
+ {{- end }}
securityContext:
- fsGroup: {{ .Values.fsGroup }}
+{{ toYaml .Values.podSecurityContext | indent 8 }}
+ {{- if .Values.fsGroup }}
+ fsGroup: {{ .Values.fsGroup }} # Deprecated value, please use .Values.podSecurityContext.fsGroup
+ {{- end }}
{{- if .Values.rbac.create }}
serviceAccountName: "{{ template "uname" . }}"
{{- else if not (eq .Values.rbac.serviceAccountName "") }}
@@ -98,21 +109,30 @@ spec:
{{- range .Values.secretMounts }}
- name: {{ .name }}
secret:
- secretName: {{ .name }}
+ secretName: {{ .secretName }}
{{- end }}
{{- if .Values.esConfig }}
- name: esconfig
configMap:
name: {{ template "uname" . }}-config
{{- end }}
+{{- if .Values.keystore }}
+ - name: keystore
+ emptyDir: {}
+ {{- range .Values.keystore }}
+ - name: keystore-{{ .secretName }}
+ secret: {{ toYaml . | nindent 12 }}
+ {{- end }}
+{{ end }}
{{- if .Values.extraVolumes }}
-{{ tpl .Values.extraVolumes . | indent 6 }}
+{{ tpl .Values.extraVolumes . | indent 8 }}
{{- end }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
initContainers:
+ {{- if .Values.sysctlInitContainer.enabled }}
- name: configure-sysctl
securityContext:
runAsUser: 0
@@ -121,11 +141,46 @@ spec:
command: ["sysctl", "-w", "vm.max_map_count={{ .Values.sysctlVmMaxMapCount}}"]
resources:
{{ toYaml .Values.initResources | indent 10 }}
+ {{- end }}
+{{ if .Values.keystore }}
+ - name: keystore
+ image: "{{ .Values.image }}:{{ .Values.imageTag }}"
+ command:
+ - sh
+ - -c
+ - |
+ #!/usr/bin/env bash
+ set -euo pipefail
+
+ elasticsearch-keystore create
+
+ for i in /tmp/keystoreSecrets/*/*; do
+ key=$(basename $i)
+ echo "Adding file $i to keystore key $key"
+ elasticsearch-keystore add-file "$key" "$i"
+ done
+
+ # Add the bootstrap password since otherwise the Elasticsearch entrypoint tries to do this on startup
+ [ ! -z "$ELASTIC_PASSWORD" ] && echo "$ELASTIC_PASSWORD" | elasticsearch-keystore add -x bootstrap.password
+
+ cp -a /usr/share/elasticsearch/config/elasticsearch.keystore /tmp/keystore/
+ env: {{ toYaml .Values.extraEnvs | nindent 10 }}
+ resources: {{ toYaml .Values.initResources | nindent 10 }}
+ volumeMounts:
+ - name: keystore
+ mountPath: /tmp/keystore
+ {{- range .Values.keystore }}
+ - name: keystore-{{ .secretName }}
+ mountPath: /tmp/keystoreSecrets/{{ .secretName }}
+ {{- end }}
+{{ end }}
{{- if .Values.extraInitContainers }}
{{ tpl .Values.extraInitContainers . | indent 6 }}
{{- end }}
containers:
- name: "{{ template "name" . }}"
+ securityContext:
+{{ toYaml .Values.securityContext | indent 10 }}
image: "{{ .Values.image }}:{{ .Values.imageTag }}"
imagePullPolicy: "{{ .Values.imagePullPolicy }}"
readinessProbe:
@@ -176,7 +231,7 @@ spec:
fieldRef:
fieldPath: metadata.name
{{- if eq .Values.roles.master "true" }}
- {{- if ge (int .Values.esMajorVersion) 7 }}
+ {{- if ge (int (include "esMajorVersion" .)) 7 }}
- name: cluster.initial_master_nodes
value: "{{ template "endpoints" .Values }}"
{{- else }}
@@ -184,7 +239,7 @@ spec:
value: "{{ .Values.minimumMasterNodes }}"
{{- end }}
{{- end }}
- {{- if lt (int .Values.esMajorVersion) 7 }}
+ {{- if lt (int (include "esMajorVersion" .)) 7 }}
- name: discovery.zen.ping.unicast.hosts
value: "{{ template "masterService" . }}-headless"
{{- else }}
@@ -209,6 +264,11 @@ spec:
- name: "{{ template "uname" . }}"
mountPath: /usr/share/elasticsearch/data
{{- end }}
+{{ if .Values.keystore }}
+ - name: keystore
+ mountPath: /usr/share/elasticsearch/config/elasticsearch.keystore
+ subPath: elasticsearch.keystore
+{{ end }}
{{- range .Values.secretMounts }}
- name: {{ .name }}
mountPath: {{ .path }}
@@ -224,6 +284,7 @@ spec:
{{- if .Values.extraVolumeMounts }}
{{ tpl .Values.extraVolumeMounts . | indent 10 }}
{{- end }}
+ {{- if .Values.masterTerminationFix }}
{{- if eq .Values.roles.master "true" }}
# This sidecar will prevent slow master re-election
# https://github.com/elastic/helm-charts/issues/63
@@ -249,8 +310,8 @@ spec:
cleanup () {
while true ; do
- local master="$(http "/_cat/master?h=node")"
- if [[ $master == "{{ template "uname" . }}"* && $master != "${NODE_NAME}" ]]; then
+ local master="$(http "/_cat/master?h=node" || echo "")"
+ if [[ $master == "{{ template "masterService" . }}"* && $master != "${NODE_NAME}" ]]; then
echo "This node is not master."
break
fi
@@ -265,6 +326,8 @@ spec:
sleep infinity &
wait $!
+ resources:
+{{ toYaml .Values.sidecarResources | indent 10 }}
env:
- name: NODE_NAME
valueFrom:
@@ -274,3 +337,8 @@ spec:
{{ toYaml .Values.extraEnvs | indent 10 }}
{{- end }}
{{- end }}
+ {{- end }}
+{{- if .Values.lifecycle }}
+ lifecycle:
+{{ toYaml .Values.lifecycle | indent 10 }}
+{{- end }}
diff --git a/elasticsearch/tests/elasticsearch_test.py b/elasticsearch/tests/elasticsearch_test.py
index 8bbe4f362..daeaa408e 100755
--- a/elasticsearch/tests/elasticsearch_test.py
+++ b/elasticsearch/tests/elasticsearch_test.py
@@ -153,6 +153,8 @@ def test_defaults():
# Service
s = r['service'][uname]
assert s['metadata']['name'] == uname
+ assert s['metadata']['annotations'] == {}
+ assert s['spec']['type'] == 'ClusterIP'
assert len(s['spec']['ports']) == 2
assert s['spec']['ports'][0] == {
'name': 'http', 'port': 9200, 'protocol': 'TCP'}
@@ -162,6 +164,7 @@ def test_defaults():
# Headless Service
h = r['service'][uname + '-headless']
assert h['spec']['clusterIP'] == 'None'
+ assert h['spec']['publishNotReadyAddresses'] == True
assert h['spec']['ports'][0]['name'] == 'http'
assert h['spec']['ports'][0]['port'] == 9200
assert h['spec']['ports'][1]['name'] == 'transport'
@@ -332,6 +335,23 @@ def test_adding_a_extra_init_container():
extraInitContainer = r['statefulset'][uname]['spec']['template']['spec']['initContainers']
assert {'name': 'do-something', 'image': 'busybox', 'command': ['do', 'something'], } in extraInitContainer
+def test_sysctl_init_container_disabled():
+ config = '''
+sysctlInitContainer:
+ enabled: false
+'''
+ r = helm_template(config)
+ initContainers = r['statefulset'][uname]['spec']['template']['spec']['initContainers']
+ assert initContainers is None
+
+def test_sysctl_init_container_enabled():
+ config = '''
+sysctlInitContainer:
+ enabled: true
+'''
+ r = helm_template(config)
+ initContainers = r['statefulset'][uname]['spec']['template']['spec']['initContainers']
+ assert initContainers[0]['name'] == 'configure-sysctl'
def test_adding_storageclass_annotation_to_volumeclaimtemplate():
config = '''
@@ -341,14 +361,27 @@ def test_adding_storageclass_annotation_to_volumeclaimtemplate():
'''
r = helm_template(config)
annotations = r['statefulset'][uname]['spec']['volumeClaimTemplates'][0]['metadata']['annotations']
- assert {'volume.beta.kubernetes.io/storage-class': 'id'} == annotations
+ assert annotations['volume.beta.kubernetes.io/storage-class'] == 'id'
+
+def test_adding_multiple_persistence_annotations():
+ config = '''
+ persistence:
+ annotations:
+ hello: world
+ world: hello
+ '''
+ r = helm_template(config)
+ annotations = r['statefulset'][uname]['spec']['volumeClaimTemplates'][0]['metadata']['annotations']
+
+ assert annotations['hello'] == 'world'
+ assert annotations['world'] == 'hello'
def test_adding_a_secret_mount():
config = '''
secretMounts:
- name: elastic-certificates
- secretName: elastic-certificates
+ secretName: elastic-certs
path: /usr/share/elasticsearch/config/certs
'''
r = helm_template(config)
@@ -360,7 +393,7 @@ def test_adding_a_secret_mount():
assert s['volumes'] == [{
'name': 'elastic-certificates',
'secret': {
- 'secretName': 'elastic-certificates'
+ 'secretName': 'elastic-certs'
}
}]
@@ -369,7 +402,7 @@ def test_adding_a_secret_mount_with_subpath():
config = '''
secretMounts:
- name: elastic-certificates
- secretName: elastic-certificates
+ secretName: elastic-certs
path: /usr/share/elasticsearch/config/certs
subPath: cert.crt
'''
@@ -444,6 +477,31 @@ def test_adding_resources_to_initcontainer():
}
}
+def test_adding_resources_to_sidecar_container():
+ config = '''
+masterTerminationFix: true
+sidecarResources:
+ limits:
+ cpu: "100m"
+ memory: "128Mi"
+ requests:
+ cpu: "100m"
+ memory: "128Mi"
+'''
+ r = helm_template(config)
+ i = r['statefulset'][uname]['spec']['template']['spec']['containers'][1]
+
+ assert i['resources'] == {
+ 'requests': {
+ 'cpu': '100m',
+ 'memory': '128Mi'
+ },
+ 'limits': {
+ 'cpu': '100m',
+ 'memory': '128Mi'
+ }
+ }
+
def test_adding_a_node_affinity():
config = '''
nodeAffinity:
@@ -572,6 +630,7 @@ def test_priority_class_name():
assert priority_class_name == "highest"
+<<<<<<< HEAD
def test_pod_security_policy():
## Make sure the default config is not creating any resources
config = '''
@@ -649,3 +708,275 @@ def test_external_service_account():
# When referencing an external service account we do not want any resources to be created.
for resource in resources:
assert resource not in r
+=======
+def test_scheduler_name():
+ r = helm_template('')
+ spec = r['statefulset'][uname]['spec']['template']['spec']
+ assert 'schedulerName' not in spec
+
+ config = '''
+schedulerName: "stork"
+'''
+ r = helm_template(config)
+ assert r['statefulset'][uname]['spec']['template']['spec']['schedulerName'] == "stork"
+
+
+def test_adding_a_nodePort():
+ config = ''
+
+ r = helm_template(config)
+
+ assert 'nodePort' not in r['service'][uname]['spec']['ports'][0]
+
+ config = '''
+ service:
+ nodePort: 30001
+ '''
+
+ r = helm_template(config)
+
+ assert r['service'][uname]['spec']['ports'][0]['nodePort'] == 30001
+
+def test_master_termination_fixed_enabled():
+ config = ''
+
+ r = helm_template(config)
+
+ assert len(r['statefulset'][uname]['spec']['template']['spec']['containers']) == 1
+
+ config = '''
+ masterTerminationFix: true
+ '''
+
+ r = helm_template(config)
+
+ c = r['statefulset'][uname]['spec']['template']['spec']['containers'][1]
+ assert c['name'] == 'elasticsearch-master-graceful-termination-handler'
+
+def test_lifecycle_hooks():
+ config = ''
+ r = helm_template(config)
+ c = r['statefulset'][uname]['spec']['template']['spec']['containers'][0]
+ assert 'lifecycle' not in c
+
+ config = '''
+ lifecycle:
+ preStop:
+ exec:
+ command: ["/bin/bash","/preStop"]
+ '''
+ r = helm_template(config)
+ c = r['statefulset'][uname]['spec']['template']['spec']['containers'][0]
+
+ assert c['lifecycle']['preStop']['exec']['command'] == ["/bin/bash","/preStop"]
+
+def test_esMajorVersion_detect_default_version():
+ config = ''
+
+ r = helm_template(config)
+ assert r['statefulset'][uname]['metadata']['annotations']['esMajorVersion'] == '7'
+
+def test_esMajorVersion_default_to_7_if_not_elastic_image():
+ config = '''
+ image: notElastic
+ imageTag: 1.0.0
+ '''
+
+ r = helm_template(config)
+ assert r['statefulset'][uname]['metadata']['annotations']['esMajorVersion'] == '7'
+
+def test_esMajorVersion_default_to_7_if_no_version_is_found():
+ config = '''
+ imageTag: not_a_number
+ '''
+
+ r = helm_template(config)
+ assert r['statefulset'][uname]['metadata']['annotations']['esMajorVersion'] == '7'
+
+def test_esMajorVersion_set_to_6_based_on_image_tag():
+ config = '''
+ imageTag: 6.8.1
+ '''
+
+ r = helm_template(config)
+ assert r['statefulset'][uname]['metadata']['annotations']['esMajorVersion'] == '6'
+
+def test_esMajorVersion_always_wins():
+ config = '''
+ esMajorVersion: 7
+ imageTag: 6.0.0
+ '''
+
+ r = helm_template(config)
+ assert r['statefulset'][uname]['metadata']['annotations']['esMajorVersion'] == '7'
+
+def test_esMajorVersion_parse_image_tag_for_oss_image():
+ config = '''
+ image: docker.elastic.co/elasticsearch/elasticsearch-oss
+ imageTag: 6.3.2
+ '''
+
+ r = helm_template(config)
+ assert r['statefulset'][uname]['metadata']['annotations']['esMajorVersion'] == '6'
+
+def test_set_pod_security_context():
+ config = ''
+ r = helm_template(config)
+ assert r['statefulset'][uname]['spec']['template']['spec']['securityContext']['fsGroup'] == 1000
+
+ config = '''
+ podSecurityContext:
+ fsGroup: 1001
+ other: test
+ '''
+
+ r = helm_template(config)
+
+ assert r['statefulset'][uname]['spec']['template']['spec']['securityContext']['fsGroup'] == 1001
+ assert r['statefulset'][uname]['spec']['template']['spec']['securityContext']['other'] == 'test'
+
+def test_fsGroup_backwards_compatability():
+ config = '''
+ fsGroup: 1001
+ '''
+
+ r = helm_template(config)
+
+ assert r['statefulset'][uname]['spec']['template']['spec']['securityContext']['fsGroup'] == 1001
+
+def test_set_container_security_context():
+ config = ''
+
+ r = helm_template(config)
+ c = r['statefulset'][uname]['spec']['template']['spec']['containers'][0]
+ assert c['securityContext']['capabilities']['drop'] == ['ALL']
+ assert c['securityContext']['runAsNonRoot'] == True
+ assert c['securityContext']['runAsUser'] == 1000
+
+ config = '''
+ securityContext:
+ runAsUser: 1001
+ other: test
+ '''
+
+ r = helm_template(config)
+ c = r['statefulset'][uname]['spec']['template']['spec']['containers'][0]
+ assert c['securityContext']['capabilities']['drop'] == ['ALL']
+ assert c['securityContext']['runAsNonRoot'] == True
+ assert c['securityContext']['runAsUser'] == 1001
+ assert c['securityContext']['other'] == 'test'
+
+def test_adding_pod_labels():
+ config = '''
+labels:
+ app.kubernetes.io/name: elasticsearch
+'''
+ r = helm_template(config)
+ assert r['statefulset'][uname]['metadata']['labels']['app.kubernetes.io/name'] == 'elasticsearch'
+
+def test_keystore_enable():
+ config = ''
+
+ r = helm_template(config)
+ s = r['statefulset'][uname]['spec']['template']['spec']
+
+ assert s['volumes'] == None
+
+ config = '''
+keystore:
+ - secretName: test
+ '''
+
+ r = helm_template(config)
+ s = r['statefulset'][uname]['spec']['template']['spec']
+
+ assert {'name': 'keystore', 'emptyDir': {}} in s['volumes']
+
+def test_keystore_init_container():
+ config = ''
+
+ r = helm_template(config)
+ i = r['statefulset'][uname]['spec']['template']['spec']['initContainers'][-1]
+
+ assert i['name'] != 'keystore'
+
+ config = '''
+keystore:
+ - secretName: test
+ '''
+
+ r = helm_template(config)
+ i = r['statefulset'][uname]['spec']['template']['spec']['initContainers'][-1]
+
+ assert i['name'] == 'keystore'
+
+def test_keystore_mount():
+ config = '''
+keystore:
+ - secretName: test
+'''
+
+ r = helm_template(config)
+ s = r['statefulset'][uname]['spec']['template']['spec']
+ assert s['containers'][0]['volumeMounts'][-1] == {
+ 'mountPath': '/usr/share/elasticsearch/config/elasticsearch.keystore',
+ 'subPath': 'elasticsearch.keystore',
+ 'name': 'keystore'
+ }
+
+def test_keystore_init_volume_mounts():
+ config = '''
+keystore:
+ - secretName: test
+ - secretName: test-with-custom-path
+ items:
+ - key: slack_url
+ path: xpack.notification.slack.account.otheraccount.secure_url
+'''
+ r = helm_template(config)
+ s = r['statefulset'][uname]['spec']['template']['spec']
+ assert s['initContainers'][-1]['volumeMounts'] == [
+ {
+ 'mountPath': '/tmp/keystore',
+ 'name': 'keystore'
+ },
+ {
+ 'mountPath': '/tmp/keystoreSecrets/test',
+ 'name': 'keystore-test'
+ },
+ {
+ 'mountPath': '/tmp/keystoreSecrets/test-with-custom-path',
+ 'name': 'keystore-test-with-custom-path'
+ }
+ ]
+
+def test_keystore_volumes():
+ config = '''
+keystore:
+ - secretName: test
+ - secretName: test-with-custom-path
+ items:
+ - key: slack_url
+ path: xpack.notification.slack.account.otheraccount.secure_url
+'''
+ r = helm_template(config)
+ s = r['statefulset'][uname]['spec']['template']['spec']
+
+ assert {
+ 'name': 'keystore-test',
+ 'secret': {
+ 'secretName': 'test'
+ }
+ } in s['volumes']
+
+ assert {
+ 'name': 'keystore-test-with-custom-path',
+ 'secret': {
+ 'secretName': 'test-with-custom-path',
+ 'items': [{
+ 'key': 'slack_url',
+ 'path': 'xpack.notification.slack.account.otheraccount.secure_url'
+ }]
+ }
+ } in s['volumes']
+>>>>>>> 1ad3826f26b4375b8935956d462f8777bd3c78a4
diff --git a/elasticsearch/values.yaml b/elasticsearch/values.yaml
index f80135f70..f8f454d95 100755
--- a/elasticsearch/values.yaml
+++ b/elasticsearch/values.yaml
@@ -16,7 +16,7 @@ roles:
replicas: 3
minimumMasterNodes: 2
-esMajorVersion: 7
+esMajorVersion: ""
# Allows you to add any config files in /usr/share/elasticsearch/config/
# such as elasticsearch.yml and log4j2.properties
@@ -43,12 +43,15 @@ secretMounts: []
# path: /usr/share/elasticsearch/config/certs
image: "docker.elastic.co/elasticsearch/elasticsearch"
-imageTag: "7.1.0"
+imageTag: "7.3.0"
imagePullPolicy: "IfNotPresent"
podAnnotations: {}
# iam.amazonaws.com/role: es-cluster
+# additionals labels
+labels: {}
+
esJavaOpts: "-Xmx1g -Xms1g"
resources:
@@ -67,6 +70,14 @@ initResources: {}
# cpu: "25m"
# memory: "128Mi"
+sidecarResources: {}
+ # limits:
+ # cpu: "25m"
+ # # memory: "128Mi"
+ # requests:
+ # cpu: "25m"
+ # memory: "128Mi"
+
networkHost: "0.0.0.0"
volumeClaimTemplate:
@@ -87,16 +98,16 @@ persistence:
enabled: true
annotations: {}
-extraVolumes: []
+extraVolumes: ""
# - name: extras
# emptyDir: {}
-extraVolumeMounts: []
+extraVolumeMounts: ""
# - name: extras
# mountPath: /usr/share/extras
# readOnly: true
-extraInitContainers: []
+extraInitContainers: ""
# - name: do-something
# image: busybox
# command: ['do', 'something']
@@ -125,6 +136,11 @@ protocol: http
httpPort: 9200
transportPort: 9300
+service:
+ type: ClusterIP
+ nodePort:
+ annotations: {}
+
updateStrategy: RollingUpdate
# This is the max unavailable setting for the pod disruption budget
@@ -132,8 +148,20 @@ updateStrategy: RollingUpdate
# of your pods to be unavailable during maintenance
maxUnavailable: 1
- # GroupID for the elasticsearch user. The official elastic docker images always have the id of 1000
-fsGroup: 1000
+podSecurityContext:
+ fsGroup: 1000
+
+# The following value is deprecated,
+# please use the above podSecurityContext.fsGroup instead
+fsGroup: ""
+
+securityContext:
+ capabilities:
+ drop:
+ - ALL
+ # readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ runAsUser: 1000
# How long to wait for elasticsearch to stop gracefully
terminationGracePeriod: 120
@@ -150,6 +178,11 @@ readinessProbe:
# https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params wait_for_status
clusterHealthCheckParams: "wait_for_status=green&timeout=1s"
+## Use an alternate scheduler.
+## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+##
+schedulerName: ""
+
imagePullSecrets: []
nodeSelector: {}
tolerations: []
@@ -171,3 +204,19 @@ ingress:
nameOverride: ""
fullnameOverride: ""
+
+# https://github.com/elastic/helm-charts/issues/63
+masterTerminationFix: false
+
+lifecycle: {}
+ # preStop:
+ # exec:
+ # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
+ # postStart:
+ # exec:
+ # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
+
+sysctlInitContainer:
+ enabled: true
+
+keystore: []
diff --git a/filebeat/Chart.yaml b/filebeat/Chart.yaml
index 7596ab7f8..8a653fac8 100755
--- a/filebeat/Chart.yaml
+++ b/filebeat/Chart.yaml
@@ -1,11 +1,11 @@
-description: Filebeat
+description: Official Elastic helm chart for Filebeat
home: https://github.com/elastic/helm-charts
maintainers:
- email: helm-charts@elastic.co
name: Elastic
name: filebeat
-version: 7.1.0
-appVersion: 7.1.0
+version: 7.3.0
+appVersion: 7.3.0
sources:
- https://github.com/elastic/beats
icon: https://helm.elastic.co/icons/filebeat.png
diff --git a/filebeat/README.md b/filebeat/README.md
index d63fa493d..8ab66d260 100644
--- a/filebeat/README.md
+++ b/filebeat/README.md
@@ -1,6 +1,6 @@
# Filebeat Helm Chart
-This functionality is in beta status and may be changed or removed completely in a future release. Elastic will take a best effort approach to fix any issues, but beta features are not subject to the support SLA of official GA features.
+This functionality is in beta and is subject to change. The design and code is less mature than official GA features and is being provided as-is with no warranties. Beta features are not subject to the support SLA of official GA features.
This helm chart is a lightweight way to configure and run our official [Filebeat docker image](https://www.elastic.co/guide/en/beats/filebeat/current/running-on-docker.html).
@@ -21,23 +21,23 @@ This helm chart is a lightweight way to configure and run our official [Filebeat
```
* Install it
```
- helm install --name filebeat elastic/filebeat --version 7.1.0
+ helm install --name filebeat elastic/filebeat
```
## Compatibility
This chart is tested with the latest supported versions. The currently tested versions are:
-| 5.x | 6.x | 7.x |
-| ------ | ----- | ----- |
-| 5.6.16 | 6.8.0 | 7.1.0 |
+| 6.x | 7.x |
+| ----- | ----- |
+| 6.8.1 | 7.3.0 |
Examples of installing older major versions can be found in the [examples](./examples) directory.
-While only the latest releases are tested, it is possible to easily install old or new releases by overriding the `imageTag`. To install version `7.1.0` of Filebeat it would look like this:
+While only the latest releases are tested, it is possible to easily install old or new releases by overriding the `imageTag`. To install version `7.3.0` of Filebeat it would look like this:
```
-helm install --name filebeat elastic/filebeat --version 7.1.0 --set imageTag=7.1.0
+helm install --name filebeat elastic/filebeat --set imageTag=7.3.0
```
@@ -46,16 +46,17 @@ helm install --name filebeat elastic/filebeat --version 7.1.0 --set imageTag=7.1
| ------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- |
| `filebeatConfig` | Allows you to add any config files in `/usr/share/filebeat` such as `filebeat.yml`. See [values.yaml](./values.yaml) for an example of the formatting with the default configuration. | see [values.yaml](./values.yaml) |
| `extraEnvs` | Extra [environment variables](https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/#using-environment-variables-inside-of-your-config) which will be appended to the `env:` definition for the container | `[]` |
-| `extraVolumeMounts` | Any extra volumes mounts to define for the Filebeat container | `[]` |
-| `extraVolumes` | Any extra volumes to define for the pod | `[]` |
+| `extraVolumeMounts` | Templatable string of additional volumeMounts to be passed to the `tpl` function | `""` |
+| `extraVolumes` | Templatable string of additional volumes to be passed to the `tpl` function | `""` |
| `hostPathRoot` | Fully-qualified [hostPath](https://kubernetes.io/docs/concepts/storage/volumes/#hostpath) that will be used to persist Filebeat registry data | `/var/lib` |
| `image` | The Filebeat docker image | `docker.elastic.co/beats/filebeat` |
-| `imageTag` | The Filebeat docker image tag | `7.1.0` |
+| `imageTag` | The Filebeat docker image tag | `7.3.0` |
| `imagePullPolicy` | The Kubernetes [imagePullPolicy](https://kubernetes.io/docs/concepts/containers/images/#updating-images) value | `IfNotPresent` |
| `imagePullSecrets` | Configuration for [imagePullSecrets](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret) so that you can use a private registry for your image | `[]` |
| `managedServiceAccount` | Whether the `serviceAccount` should be managed by this helm chart. Set this to `false` in order to manage your own service account and related roles. | `true` |
| `podAnnotations` | Configurable [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) applied to all Filebeat pods | `{}` |
-| `podSecurityContext` | Configurable [podSecurityContext](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for Filebeat pod execution environment | `runAsUser: 0`
`privileged: false` |
+| `labels` | Configurable [label](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) applied to all Filebeat pods | `{}` |
+| `podSecurityContext` | Configurable [podSecurityContext](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for Filebeat pod execution environment | `runAsUser: 0`
`privileged: false` |
| `livenessProbe` | Parameters to pass to [liveness probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/) checks for values such as timeouts and thresholds. | `failureThreshold: 3`
`initialDelaySeconds: 10`
`periodSeconds: 10`
`successThreshold: 3`
`timeoutSeconds: 5` |
| `readinessProbe` | Parameters to pass to [readiness probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/) checks for values such as timeouts and thresholds. | `failureThreshold: 3`
`initialDelaySeconds: 10`
`periodSeconds: 10`
`successThreshold: 3`
`timeoutSeconds: 5` |
| `resources` | Allows you to set the [resources](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) for the `DaemonSet` | `requests.cpu: 100m`
`requests.memory: 100Mi`
`limits.cpu: 1000m`
`limits.memory: 200Mi` |
@@ -63,6 +64,9 @@ helm install --name filebeat elastic/filebeat --version 7.1.0 --set imageTag=7.1
| `secretMounts` | Allows you easily mount a secret as a file inside the `DaemonSet`. Useful for mounting certificates and other secrets. See [values.yaml](./values.yaml) for an example | `[]` |
| `terminationGracePeriod` | Termination period (in seconds) to wait before killing Filebeat pod process on pod shutdown | `30` |
| `tolerations` | Configurable [tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) | `[]` |
+| `nodeSelector` | Configurable [nodeSelector](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) | `{}` |
+| `affinity` | Configurable [affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) | `{}` |
+| `priorityClassName` | The [name of the PriorityClass](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass). No default is supplied as the PriorityClass must be created first. | `""` |
| `updateStrategy` | The [updateStrategy](https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/#daemonset-update-strategy) for the `DaemonSet`. By default Kubernetes will kill and recreate pods on updates. Setting this to `OnDelete` will require that pods be deleted manually. | `RollingUpdate` |
## Examples
diff --git a/filebeat/examples/6.x/test/goss.yaml b/filebeat/examples/6.x/test/goss.yaml
index 045673ce1..e18f4e81c 100644
--- a/filebeat/examples/6.x/test/goss.yaml
+++ b/filebeat/examples/6.x/test/goss.yaml
@@ -18,4 +18,4 @@ http:
status: 200
timeout: 2000
body:
- - 'filebeat-6.8.0'
+ - 'filebeat-6.8.1'
diff --git a/filebeat/examples/6.x/values.yaml b/filebeat/examples/6.x/values.yaml
index 6daa49eef..d0eeea620 100644
--- a/filebeat/examples/6.x/values.yaml
+++ b/filebeat/examples/6.x/values.yaml
@@ -1,4 +1,4 @@
-imageTag: 6.8.0
+imageTag: 6.8.1
extraEnvs:
- name: ELASTICSEARCH_HOSTS
diff --git a/filebeat/examples/default/test/goss.yaml b/filebeat/examples/default/test/goss.yaml
index dcd9017bb..2de3a9fb0 100644
--- a/filebeat/examples/default/test/goss.yaml
+++ b/filebeat/examples/default/test/goss.yaml
@@ -29,7 +29,7 @@ http:
status: 200
timeout: 2000
body:
- - 'filebeat-7.1.0'
+ - 'filebeat-7.3.0'
file:
/usr/share/filebeat/filebeat.yml:
@@ -44,4 +44,4 @@ command:
exit-status: 0
stdout:
- 'elasticsearch: http://elasticsearch-master:9200'
- - 'version: 7.1.0'
+ - 'version: 7.3.0'
diff --git a/filebeat/examples/oss/test/goss.yaml b/filebeat/examples/oss/test/goss.yaml
index fc43e5d28..03b8907a9 100644
--- a/filebeat/examples/oss/test/goss.yaml
+++ b/filebeat/examples/oss/test/goss.yaml
@@ -19,4 +19,4 @@ http:
status: 200
timeout: 2000
body:
- - 'filebeat-7.1.0'
+ - 'filebeat-7.3.0'
diff --git a/filebeat/examples/security/test/goss.yaml b/filebeat/examples/security/test/goss.yaml
index b41c847d4..2df9e37ee 100644
--- a/filebeat/examples/security/test/goss.yaml
+++ b/filebeat/examples/security/test/goss.yaml
@@ -3,7 +3,7 @@ http:
status: 200
timeout: 2000
body:
- - 'filebeat-7.1.0'
+ - 'filebeat-7.3.0'
allow-insecure: true
username: '{{ .Env.ELASTICSEARCH_USERNAME }}'
password: '{{ .Env.ELASTICSEARCH_PASSWORD }}'
diff --git a/filebeat/templates/daemonset.yaml b/filebeat/templates/daemonset.yaml
index bc53b51ba..ff0b9bc8b 100644
--- a/filebeat/templates/daemonset.yaml
+++ b/filebeat/templates/daemonset.yaml
@@ -8,6 +8,9 @@ metadata:
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
heritage: {{ .Release.Service | quote }}
release: {{ .Release.Name | quote }}
+ {{- range $key, $value := .Values.labels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
spec:
selector:
matchLabels:
@@ -33,8 +36,16 @@ spec:
release: {{ .Release.Name | quote }}
spec:
{{- with .Values.tolerations }}
- tolerations:
-{{ toYaml . | indent 6 }}
+ tolerations: {{ toYaml . | nindent 6 }}
+ {{- end }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector: {{ toYaml . | nindent 8 }}
+ {{- end }}
+ {{- if .Values.priorityClassName }}
+ priorityClassName: {{ .Values.priorityClassName }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity: {{ toYaml . | nindent 8 -}}
{{- end }}
serviceAccountName: {{ template "serviceAccount" . }}
terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }}
@@ -42,7 +53,7 @@ spec:
{{- range .Values.secretMounts }}
- name: {{ .name }}
secret:
- secretName: {{ .name }}
+ secretName: {{ .secretName }}
{{- end }}
{{- if .Values.filebeatConfig }}
- name: filebeat-config
diff --git a/filebeat/tests/filebeat_test.py b/filebeat/tests/filebeat_test.py
index da73f54e7..5bd288921 100644
--- a/filebeat/tests/filebeat_test.py
+++ b/filebeat/tests/filebeat_test.py
@@ -137,7 +137,7 @@ def test_adding_in_filebeat_config():
d = r['daemonset'][name]['spec']['template']['spec']
- assert {'configMap': {'name': name + '-config', 'defaultMode': 0600}, 'name': project + '-config'} in d['volumes']
+ assert {'configMap': {'name': name + '-config', 'defaultMode': 0o600}, 'name': project + '-config'} in d['volumes']
assert {'mountPath': '/usr/share/filebeat/filebeat.yml', 'name': project + '-config', 'subPath': 'filebeat.yml', 'readOnly': True} in d['containers'][0]['volumeMounts']
assert {'mountPath': '/usr/share/filebeat/other-config.yml', 'name': project + '-config', 'subPath': 'other-config.yml', 'readOnly': True} in d['containers'][0]['volumeMounts']
@@ -148,7 +148,7 @@ def test_adding_a_secret_mount():
config = '''
secretMounts:
- name: elastic-certificates
- secretName: elastic-certificates
+ secretName: elastic-certs
path: /usr/share/filebeat/config/certs
'''
r = helm_template(config)
@@ -160,7 +160,7 @@ def test_adding_a_secret_mount():
assert s['volumes'][0] == {
'name': 'elastic-certificates',
'secret': {
- 'secretName': 'elastic-certificates'
+ 'secretName': 'elastic-certs'
}
}
@@ -180,3 +180,54 @@ def test_adding_a_extra_volume_with_volume_mount():
assert {'name': 'extras', 'emptyDir': {}} in extraVolume
extraVolumeMounts = r['daemonset'][name]['spec']['template']['spec']['containers'][0]['volumeMounts']
assert {'name': 'extras', 'mountPath': '/usr/share/extras', 'readOnly': True} in extraVolumeMounts
+
+def test_adding_pod_labels():
+ config = '''
+labels:
+ app.kubernetes.io/name: filebeat
+'''
+ r = helm_template(config)
+ assert r['daemonset'][name]['metadata']['labels']['app.kubernetes.io/name'] == 'filebeat'
+
+
+def test_adding_a_node_selector():
+ config = '''
+nodeSelector:
+ disktype: ssd
+'''
+ r = helm_template(config)
+ assert r['daemonset'][name]['spec']['template']['spec']['nodeSelector']['disktype'] == 'ssd'
+
+
+def test_adding_an_affinity_rule():
+ config = '''
+affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - filebeat
+ topologyKey: kubernetes.io/hostname
+'''
+
+ r = helm_template(config)
+ assert r['daemonset'][name]['spec']['template']['spec']['affinity']['podAntiAffinity'][
+ 'requiredDuringSchedulingIgnoredDuringExecution'][0]['topologyKey'] == 'kubernetes.io/hostname'
+
+def test_priority_class_name():
+ config = '''
+priorityClassName: ""
+'''
+ r = helm_template(config)
+ spec = r['daemonset'][name]['spec']['template']['spec']
+ assert 'priorityClassName' not in spec
+
+ config = '''
+priorityClassName: "highest"
+'''
+ r = helm_template(config)
+ priority_class_name = r['daemonset'][name]['spec']['template']['spec']['priorityClassName']
+ assert priority_class_name == "highest"
diff --git a/filebeat/values.yaml b/filebeat/values.yaml
index 76c79aebc..6f15685eb 100755
--- a/filebeat/values.yaml
+++ b/filebeat/values.yaml
@@ -21,12 +21,12 @@ extraEnvs: []
# - name: MY_ENVIRONMENT_VAR
# value: the_value_goes_here
-extraVolumeMounts: []
+extraVolumeMounts: ""
# - name: extras
# mountPath: /usr/share/extras
# readOnly: true
-extraVolumes: []
+extraVolumes: ""
# - name: extras
# emptyDir: {}
@@ -34,7 +34,7 @@ extraVolumes: []
hostPathRoot: /var/lib
image: "docker.elastic.co/beats/filebeat"
-imageTag: "7.1.0"
+imageTag: "7.3.0"
imagePullPolicy: "IfNotPresent"
imagePullSecrets: []
@@ -53,6 +53,9 @@ readinessProbe:
# Whether this chart should self-manage its service account, role, and associated role binding.
managedServiceAccount: true
+# additionals labels
+labels: {}
+
podAnnotations: {}
# iam.amazonaws.com/role: es-cluster
@@ -87,6 +90,14 @@ terminationGracePeriod: 30
tolerations: []
+nodeSelector: {}
+
+affinity: {}
+
+# This is the PriorityClass settings as defined in
+# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
+priorityClassName: ""
+
updateStrategy: RollingUpdate
# Override various naming aspects of this chart
diff --git a/helpers/bumper.py b/helpers/bumper.py
index f79341700..f97434c0a 100755
--- a/helpers/bumper.py
+++ b/helpers/bumper.py
@@ -7,15 +7,17 @@
os.chdir(os.path.join(os.path.dirname(__file__), '..'))
+chart_version = '7.3.0'
+
versions = {
- 5: '5.6.16',
- 6: '6.8.0',
- 7: '7.1.0',
+ 6: '6.8.1',
+ 7: '7.3.0',
}
file_patterns = [
- '*/examples/*/test/goss.y*ml',
+ '*/examples/*/test/goss*.y*ml',
'*/examples/*/*.y*ml',
+ 'helpers/examples.mk',
'*/README.md',
'*/values.y*ml',
'*/Chart.y*ml',
@@ -26,7 +28,7 @@
blacklist = re.compile(r".*127.0.0.1.*")
for major, version in versions.iteritems():
- r = re.compile(r"{0}\.[0-9]*\.[0-9]*".format(major))
+ r = re.compile(r"{0}\.[0-9]*\.[0-9]*-?[0-9]?".format(major))
for pattern in file_patterns:
for f in glob.glob(pattern):
print(f)
@@ -34,4 +36,7 @@
if re.match(blacklist, line):
print(line.rstrip())
else:
- print(r.sub(version, line.rstrip()))
+ if f.endswith('Chart.yaml') and line.startswith('version:'):
+ print(r.sub(chart_version, line.rstrip()))
+ else:
+ print(r.sub(version, line.rstrip()))
diff --git a/helpers/common.mk b/helpers/common.mk
index bd2c71881..3afe2bfd1 100644
--- a/helpers/common.mk
+++ b/helpers/common.mk
@@ -3,6 +3,7 @@ default: test
.ONESHELL:
lint:
+ grep 'charts/' ./.helmignore || echo 'charts/' >> ./.helmignore
helm lint --strict ./
template:
@@ -10,12 +11,16 @@ template:
build:
cd ../helpers/helm-tester && \
- docker build -t helm-tester .
+ for i in {1..5}; do docker build -t helm-tester . && break || sleep 15; done
pytest:
pytest -sv --color=yes
-test-all: template lint pytest
+deps:
+ sed --in-place '/charts\//d' ./.helmignore
+ helm dependency update
+
+test-all: lint deps template pytest
test: build
docker run --rm -i --user "$$(id -u):$$(id -g)" -v $$(pwd)/../:/app -w /app/$$(basename $$(pwd)) helm-tester make test-all
diff --git a/helpers/examples.mk b/helpers/examples.mk
index 8319c1014..c874cd45f 100644
--- a/helpers/examples.mk
+++ b/helpers/examples.mk
@@ -1,8 +1,11 @@
GOSS_VERSION := v0.3.6
+GOSS_FILE ?= goss.yaml
+GOSS_SELECTOR ?= release=$(RELEASE)
+STACK_VERSION := 7.3.0
goss:
- GOSS_CONTAINER=$$(kubectl get pods -l release=$(RELEASE) -o name | awk -F'/' 'NR==1{ print $$NF }') && \
+ GOSS_CONTAINER=$$(kubectl get --no-headers=true pods -l $(GOSS_SELECTOR) -o custom-columns=:metadata.name | sed -n 1p ) && \
echo Testing with pod: $$GOSS_CONTAINER && \
- kubectl cp test/*.yaml $$GOSS_CONTAINER:/tmp/goss.yaml && \
- kubectl exec $$GOSS_CONTAINER -- sh -c "cd /tmp/ && curl -s -L https://github.com/aelsabbahy/goss/releases/download/$(GOSS_VERSION)/goss-linux-amd64 -o goss && chmod +rx ./goss && ./goss validate --retry-timeout 30s --sleep 5s --color --format documentation"
+ kubectl cp test/$(GOSS_FILE) $$GOSS_CONTAINER:/tmp/$(GOSS_FILE) && \
+ kubectl exec $$GOSS_CONTAINER -- sh -c "cd /tmp/ && curl -s -L https://github.com/aelsabbahy/goss/releases/download/$(GOSS_VERSION)/goss-linux-amd64 -o goss && chmod +rx ./goss && ./goss --gossfile $(GOSS_FILE) validate --retry-timeout 300s --sleep 5s --color --format documentation"
diff --git a/helpers/helm-tester/Dockerfile b/helpers/helm-tester/Dockerfile
index b197fcc39..8843743ee 100644
--- a/helpers/helm-tester/Dockerfile
+++ b/helpers/helm-tester/Dockerfile
@@ -5,4 +5,6 @@ ENV HELM_VERSION=2.14.0
RUN wget https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
tar xfv helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
mv linux-amd64/helm /usr/local/bin/ && \
- rm -rf linux-amd64
+ rm -rf linux-amd64 && \
+ HOME=/ helm init --client-only && \
+ chmod 777 -R /.helm
diff --git a/helpers/matrix.yml b/helpers/matrix.yml
index 2e43ca6ad..221b35e47 100644
--- a/helpers/matrix.yml
+++ b/helpers/matrix.yml
@@ -2,25 +2,31 @@ CHART:
- elasticsearch
- kibana
- filebeat
+ - metricbeat
ES_SUITE:
- default
+ - config
- multi
- oss
- security
- upgrade
- - 5.x
- 6.x
KIBANA_SUITE:
- default
- oss
- security
- - 5.x
- 6.x
FILEBEAT_SUITE:
- default
- oss
- security
- 6.x
+METRICBEAT_SUITE:
+ - default
+ - oss
+ - security
+ - 6.x
KUBERNETES_VERSION:
- '1.11'
- '1.12'
+ - '1.13'
diff --git a/helpers/release.md b/helpers/release.md
index 2d99ffc2e..590323a82 100644
--- a/helpers/release.md
+++ b/helpers/release.md
@@ -1,7 +1,7 @@
# Release process
* Update the [changelog](/CHANGELOG.md)
-* Update the stack versions in [bumper.py](/helpers/bumper.py) and run the script. This will update the versions in all the right places
+* Update the stack and chart versions in [bumper.py](/helpers/bumper.py) and run the script. This will update the versions in all the right places
* Open a pull request and wait for a green build before merging
* Create a [new release](https://github.com/elastic/helm-charts/releases/new) and include the latest changelog entry
* Run the [release script](/helpers/release.py) to build and upload the artifact
diff --git a/helpers/release.py b/helpers/release.py
index 264b23ddc..145995ff1 100644
--- a/helpers/release.py
+++ b/helpers/release.py
@@ -5,6 +5,11 @@
import subprocess
import yaml
+try:
+ raw_input
+except NameError: # Python 3
+ raw_input = input
+
os.chdir(os.path.join(os.path.dirname(__file__), '..'))
bucket = 'gs://' + os.environ['GCS_BUCKET']
@@ -25,6 +30,9 @@ def run(cmd):
for filepath in glob.iglob('*/Chart.yaml'):
chart = os.path.split(os.path.dirname(filepath))[-1]
+ # Download dependencies
+ run(['helm', 'dependency', 'update', chart])
+
# Package up the chart
run(['helm', 'package', chart, '--destination', chart])
diff --git a/helpers/terraform/Dockerfile b/helpers/terraform/Dockerfile
index be56f5e95..758f0bb8c 100644
--- a/helpers/terraform/Dockerfile
+++ b/helpers/terraform/Dockerfile
@@ -4,9 +4,11 @@ ENV VAULT_VERSION 0.9.3
ENV TERRAFORM_VERSION=0.11.7
ENV KUBECTL_VERSION=1.14.1
ENV HELM_VERSION=2.14.0
+ENV DOCKER_VERSION=18.09.7
RUN yum -y install \
make \
+ openssl \
unzip \
which
@@ -16,23 +18,29 @@ RUN yum -y install \
gcloud config set component_manager/disable_update_check true && \
gcloud version
-RUN curl --retry 5 -O https://releases.hashicorp.com/vault/${VAULT_VERSION}/vault_${VAULT_VERSION}_linux_amd64.zip && \
+RUN curl -O https://releases.hashicorp.com/vault/${VAULT_VERSION}/vault_${VAULT_VERSION}_linux_amd64.zip && \
unzip vault_${VAULT_VERSION}_linux_amd64.zip -d /usr/local/bin/ && \
chmod +x /usr/local/bin/vault && \
vault version
-RUN curl --retry 5 -O https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip && \
+RUN curl -O https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip && \
unzip terraform_${TERRAFORM_VERSION}_linux_amd64.zip -d /usr/local/bin/ && \
rm -f terraform_${TERRAFORM_VERSION}_linux_amd64.zip && \
terraform version
-RUN curl --retry 5 -O https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL_VERSION}/bin/linux/amd64/kubectl && \
+RUN curl -O https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL_VERSION}/bin/linux/amd64/kubectl && \
mv kubectl /usr/local/bin/ && \
chmod a+x /usr/local/bin/kubectl && \
kubectl version --client
-RUN curl --retry 5 -O https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
+RUN curl -O https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
tar xfv helm-v${HELM_VERSION}-linux-amd64.tar.gz && \
mv linux-amd64/helm /usr/local/bin/ && \
rm -rf linux-amd64 && \
helm version --client
+
+RUN curl -O https://download.docker.com/linux/static/stable/x86_64/docker-${DOCKER_VERSION}.tgz && \
+ tar xfv docker* && \
+ mv docker/docker /usr/local/bin && \
+ rm -rf docker/ && \
+ docker
diff --git a/helpers/terraform/Makefile b/helpers/terraform/Makefile
index b460db75e..583d2f58c 100644
--- a/helpers/terraform/Makefile
+++ b/helpers/terraform/Makefile
@@ -62,8 +62,11 @@ up: k8s
for i in 1 2 3 4 5; do helm init --wait --upgrade && break || sleep 5; done
integration: creds
- cd ../../$(CHART)/examples/$(SUITE) && \
+ cd ../../$(CHART)/ && \
+ helm init --client-only && \
+ helm dependency update && \
+ cd ./examples/$(SUITE) && \
make
build:
- docker build -t helm-charts .
+ for i in 1 2 3 4 5; do docker build -t helm-charts . && break || sleep 5; done
diff --git a/helpers/terraform/in-docker b/helpers/terraform/in-docker
index 0cce963f4..7b2f6cf1a 100755
--- a/helpers/terraform/in-docker
+++ b/helpers/terraform/in-docker
@@ -11,5 +11,6 @@ docker run --rm --interactive \
--env HOME=/app \
--volume "${PWD}/../../:/app" \
--user "$(id -u):$(id -g)" \
+ -v /var/run/docker.sock:/var/run/docker.sock \
--workdir "/app/helpers/terraform/" \
"helm-charts" "$@"
diff --git a/helpers/terraform/main.tf b/helpers/terraform/main.tf
index 635d9f436..292c881fa 100644
--- a/helpers/terraform/main.tf
+++ b/helpers/terraform/main.tf
@@ -1,7 +1,7 @@
provider "google" {
project = "${var.project}"
region = "${var.primary_region}"
- version = "1.13.0"
+ version = "2.13.0"
}
terraform {
@@ -25,8 +25,8 @@ resource "google_container_cluster" "cluster" {
}
timeouts {
- create = "30m"
- delete = "30m"
- update = "30m"
+ create = "180m"
+ delete = "180m"
+ update = "180m"
}
}
diff --git a/kibana/Chart.yaml b/kibana/Chart.yaml
index ace954179..c16bd2c89 100755
--- a/kibana/Chart.yaml
+++ b/kibana/Chart.yaml
@@ -1,11 +1,11 @@
-description: Kibana
+description: Official Elastic helm chart for Kibana
home: https://github.com/elastic/helm-charts
maintainers:
- email: helm-charts@elastic.co
name: Elastic
name: kibana
-version: 7.1.0
-appVersion: 7.1.0
+version: 7.3.0
+appVersion: 7.3.0
sources:
- https://github.com/elastic/kibana
icon: https://helm.elastic.co/icons/kibana.png
diff --git a/kibana/README.md b/kibana/README.md
index 0e14afb1a..2fe63fd35 100644
--- a/kibana/README.md
+++ b/kibana/README.md
@@ -1,6 +1,6 @@
# Kibana Helm Chart
-This functionality is in beta status and may be changed or removed completely in a future release. Elastic will take a best effort approach to fix any issues, but beta features are not subject to the support SLA of official GA features.
+This functionality is in beta and is subject to change. The design and code is less mature than official GA features and is being provided as-is with no warranties. Beta features are not subject to the support SLA of official GA features.
This helm chart is a lightweight way to configure and run our official [Kibana docker image](https://www.elastic.co/guide/en/kibana/current/docker.html)
@@ -17,23 +17,23 @@ This helm chart is a lightweight way to configure and run our official [Kibana d
```
* Install it
```
- helm install --name kibana elastic/kibana --version 7.1.0
+ helm install --name kibana elastic/kibana
```
## Compatibility
This chart is tested with the latest supported versions. The currently tested versions are:
-| 5.x | 6.x | 7.x |
-| ------ | ----- | ----- |
-| 5.6.16 | 6.8.0 | 7.1.0 |
+| 6.x | 7.x |
+| ----- | ----- |
+| 6.8.1 | 7.3.0 |
Examples of installing older major versions can be found in the [examples](./examples) directory.
-While only the latest releases are tested, it is possible to easily install old or new releases by overriding the `imageTag`. To install version `7.1.0` of Kibana it would look like this:
+While only the latest releases are tested, it is possible to easily install old or new releases by overriding the `imageTag`. To install version `7.3.0` of Kibana it would look like this:
```
-helm install --name kibana elastic/kibana --version 7.1.0 --set imageTag=7.1.0
+helm install --name kibana elastic/kibana --set imageTag=7.3.0
```
## Configuration
@@ -46,15 +46,18 @@ helm install --name kibana elastic/kibana --version 7.1.0 --set imageTag=7.1.0
| `extraEnvs` | Extra [environment variables](https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/#using-environment-variables-inside-of-your-config) which will be appended to the `env:` definition for the container | `[]` |
| `secretMounts` | Allows you easily mount a secret as a file inside the deployment. Useful for mounting certificates and other secrets. See [values.yaml](./values.yaml) for an example | `[]` |
| `image` | The Kibana docker image | `docker.elastic.co/kibana/kibana` |
-| `imageTag` | The Kibana docker image tag | `7.1.0` |
+| `imageTag` | The Kibana docker image tag | `7.3.0` |
| `imagePullPolicy` | The Kubernetes [imagePullPolicy](https://kubernetes.io/docs/concepts/containers/images/#updating-images) value | `IfNotPresent` |
+| `podAnnotations` | Configurable [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) applied to all Kibana pods | `{}` |
| `resources` | Allows you to set the [resources](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) for the statefulset | `requests.cpu: 100m`
`requests.memory: 2Gi`
`limits.cpu: 1000m`
`limits.memory: 2Gi` |
| `protocol` | The protocol that will be used for the readinessProbe. Change this to `https` if you have `server.ssl.enabled: true` set | `http` |
-| `healthCheckPath` | The path used for the readinessProbe to check that Kibana is ready | `/app/kibana` |
+| `serverHost` | The [`server.host`](https://www.elastic.co/guide/en/kibana/current/settings.html) Kibana setting. This is set explicitly so that the default always matches what comes with the docker image. | `0.0.0.0` |
+| `healthCheckPath` | The path used for the readinessProbe to check that Kibana is ready. If you are setting `server.basePath` you will also need to update this to `/${basePath}/app/kibana` | `/app/kibana` |
| `kibanaConfig` | Allows you to add any config files in `/usr/share/kibana/config/` such as `kibana.yml`. See [values.yaml](./values.yaml) for an example of the formatting. | `{}` |
-| `podSecurityContext` | Allows you to set the [securityContext](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod) for the pod | `{}` |
+| `podSecurityContext` | Allows you to set the [securityContext](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod) for the pod | `fsGroup: 1000` |
+| `securityContext` | Allows you to set the [securityContext](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container) for the container | `capabilities.drop:[ALL]`
`runAsNonRoot: true`
`runAsUser: 1000` |
| `serviceAccount` | Allows you to overwrite the "default" [serviceAccount](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) for the pod | `[]` |
-| `priorityClassName` | The [name of the PriorityClass](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass). No default is supplied as the PriorityClass must be created first. | `` |
+| `priorityClassName` | The [name of the PriorityClass](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass). No default is supplied as the PriorityClass must be created first. | `""` |
| `antiAffinityTopologyKey` | The [anti-affinity topology key](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). By default this will prevent multiple Kibana instances from running on the same Kubernetes node | `kubernetes.io/hostname` |
| `antiAffinity` | Setting this to hard enforces the [anti-affinity rules](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). If it is set to soft it will be done "best effort" | `hard` |
| `httpPort` | The http port that Kubernetes will use for the healthchecks and the service. | `5601` |
@@ -65,6 +68,8 @@ helm install --name kibana elastic/kibana --version 7.1.0 --set imageTag=7.1.0
| `nodeSelector` | Configurable [nodeSelector](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) so that you can target specific nodes for your Kibana instances | `{}` |
| `tolerations` | Configurable [tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) | `[]` |
| `ingress` | Configurable [ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) to expose the Kibana service. See [`values.yaml`](./values.yaml) for an example | `enabled: false` |
+| `service` | Configurable [service](https://kubernetes.io/docs/concepts/services-networking/service/) to expose the Kibana service. See [`values.yaml`](./values.yaml) for an example | `type: ClusterIP`
`port: 5601`
`nodePort:`
`annotations: {}` |
+| `labels` | Configurable [label](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) applied to all Kibana pods | `{}` |
## Examples
@@ -91,11 +96,20 @@ In [examples/](./examples) you will find some example configurations. These exam
cd examples/security
make
```
-* You can now setup a port forward and access Kibana at http://localhost:5601 with the credentials `elastic:changeme`
- ```
- kubectl port-forward deployment/helm-kibana-default-kibana 5601
+* Setup a port forward and access Kibana at https://localhost:5601
```
+ # Setup the port forward
+ kubectl port-forward deployment/helm-kibana-security-kibana 5601
+ # Run this in a seperate terminal
+ # Get the auto generated password
+ password=$(kubectl get secret elastic-credentials -o jsonpath='{.data.password}' | base64 --decode)
+ echo $password
+
+ # Test Kibana is working with curl or access it with your browser at https://localhost:5601
+ # The example certificate is self signed so you may see a warning about the certificate
+ curl -I -k -u elastic:$password https://localhost:5601/app/kibana
+ ```
## Testing
diff --git a/kibana/examples/5.x/Makefile b/kibana/examples/5.x/Makefile
deleted file mode 100644
index d7ab7579c..000000000
--- a/kibana/examples/5.x/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-default: test
-include ../../../helpers/examples.mk
-
-RELEASE := helm-kibana-fivex
-
-install:
- helm upgrade --wait --timeout=600 --install --values ./values.yml $(RELEASE) ../../ ; \
-
-purge:
- helm del --purge $(RELEASE)
-
-test: install goss
diff --git a/kibana/examples/5.x/test/goss.yaml b/kibana/examples/5.x/test/goss.yaml
deleted file mode 100644
index 0756b9f4a..000000000
--- a/kibana/examples/5.x/test/goss.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-http:
- http://localhost:5601/api/status:
- status: 200
- timeout: 2000
- body:
- - '"version":"5.6.16"'
- username: '{{ .Env.ELASTICSEARCH_USERNAME }}'
- password: '{{ .Env.ELASTICSEARCH_PASSWORD }}'
-
- http://localhost:5601/app/kibana:
- status: 200
- timeout: 2000
- username: '{{ .Env.ELASTICSEARCH_USERNAME }}'
- password: '{{ .Env.ELASTICSEARCH_PASSWORD }}'
diff --git a/kibana/examples/5.x/values.yml b/kibana/examples/5.x/values.yml
deleted file mode 100644
index 226dd60f6..000000000
--- a/kibana/examples/5.x/values.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-
-imageTag: 5.6.16
-elasticsearchURL: "http://fivex-master:9200"
-
-extraEnvs:
- - name: 'ELASTICSEARCH_USERNAME'
- valueFrom:
- secretKeyRef:
- name: elastic-fivex-credentials
- key: username
- - name: 'ELASTICSEARCH_PASSWORD'
- valueFrom:
- secretKeyRef:
- name: elastic-fivex-credentials
- key: password
diff --git a/kibana/examples/6.x/test/goss.yaml b/kibana/examples/6.x/test/goss.yaml
index 2cd3ab0ce..b2a3e5acc 100644
--- a/kibana/examples/6.x/test/goss.yaml
+++ b/kibana/examples/6.x/test/goss.yaml
@@ -3,7 +3,7 @@ http:
status: 200
timeout: 2000
body:
- - '"number":"6.8.0"'
+ - '"number":"6.8.1"'
http://localhost:5601/app/kibana:
status: 200
diff --git a/kibana/examples/6.x/values.yml b/kibana/examples/6.x/values.yml
index 92edf70fa..0f05a541e 100644
--- a/kibana/examples/6.x/values.yml
+++ b/kibana/examples/6.x/values.yml
@@ -1,4 +1,4 @@
---
-imageTag: 6.8.0
+imageTag: 6.8.1
elasticsearchHosts: "http://six-master:9200"
diff --git a/kibana/examples/default/test/goss.yaml b/kibana/examples/default/test/goss.yaml
index 33561424e..3691f18c7 100644
--- a/kibana/examples/default/test/goss.yaml
+++ b/kibana/examples/default/test/goss.yaml
@@ -3,7 +3,7 @@ http:
status: 200
timeout: 2000
body:
- - '"number":"7.1.0"'
+ - '"number":"7.3.0"'
http://localhost:5601/app/kibana:
status: 200
diff --git a/kibana/examples/security/Makefile b/kibana/examples/security/Makefile
index 0411eeb29..a9af51499 100644
--- a/kibana/examples/security/Makefile
+++ b/kibana/examples/security/Makefile
@@ -3,18 +3,15 @@ include ../../../helpers/examples.mk
RELEASE := helm-kibana-security
-install: secrets
+install:
helm upgrade --wait --timeout=600 --install --values ./security.yml $(RELEASE) ../../ ; \
-test: install goss
+test: secrets install goss
purge:
+ kubectl delete secret kibana || true
helm del --purge $(RELEASE)
secrets:
- kubectl delete secret kibana-certificates || true
- vault read -field=kibana.crt secret/devops-ci/helm-charts/kibana/security/certificates | base64 --decode > kibana.crt
- vault read -field=kibana.key secret/devops-ci/helm-charts/kibana/security/certificates | base64 --decode > kibana.key
- kubectl create secret generic kibana-certificates --from-file=kibana.crt --from-file=kibana.key && \
- rm -f kibana.crt kibana.key
-
+ encryptionkey=$$(echo $$(docker run --rm docker.elastic.co/elasticsearch/elasticsearch:$(STACK_VERSION) /bin/sh -c "< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c50")) && \
+ kubectl create secret generic kibana --from-literal=encryptionkey=$$encryptionkey
diff --git a/kibana/examples/security/security.yml b/kibana/examples/security/security.yml
index 3517ed8e3..dfc9d5d13 100644
--- a/kibana/examples/security/security.yml
+++ b/kibana/examples/security/security.yml
@@ -13,14 +13,19 @@ extraEnvs:
secretKeyRef:
name: elastic-credentials
key: password
+ - name: 'KIBANA_ENCRYPTION_KEY'
+ valueFrom:
+ secretKeyRef:
+ name: kibana
+ key: encryptionkey
kibanaConfig:
kibana.yml: |
server.ssl:
enabled: true
- key: /usr/share/kibana/config/certs/kibana/kibana.key
- certificate: /usr/share/kibana/config/certs/kibana/kibana.crt
- xpack.security.encryptionKey: something_at_least_32_characters
+ key: /usr/share/kibana/config/certs/elastic-certificate.pem
+ certificate: /usr/share/kibana/config/certs/elastic-certificate.pem
+ xpack.security.encryptionKey: ${KIBANA_ENCRYPTION_KEY}
elasticsearch.ssl:
certificateAuthorities: /usr/share/kibana/config/certs/elastic-certificate.pem
verificationMode: certificate
@@ -31,6 +36,3 @@ secretMounts:
- name: elastic-certificate-pem
secretName: elastic-certificate-pem
path: /usr/share/kibana/config/certs
- - name: kibana-certificates
- secretName: kibana-certificates
- path: /usr/share/kibana/config/certs/kibana
diff --git a/kibana/examples/security/test/goss.yaml b/kibana/examples/security/test/goss.yaml
index 51b3ce214..39e9a4852 100644
--- a/kibana/examples/security/test/goss.yaml
+++ b/kibana/examples/security/test/goss.yaml
@@ -6,15 +6,22 @@ http:
username: '{{ .Env.ELASTICSEARCH_USERNAME }}'
password: '{{ .Env.ELASTICSEARCH_PASSWORD }}'
+ https://helm-kibana-security-kibana:5601/app/kibana:
+ status: 200
+ timeout: 2000
+ allow-insecure: true
+ username: '{{ .Env.ELASTICSEARCH_USERNAME }}'
+ password: '{{ .Env.ELASTICSEARCH_PASSWORD }}'
+
file:
/usr/share/kibana/config/kibana.yml:
exists: true
contains:
- 'server.ssl:'
- ' enabled: true'
- - ' key: /usr/share/kibana/config/certs/kibana/kibana.key'
- - ' certificate: /usr/share/kibana/config/certs/kibana/kibana.crt'
- - 'xpack.security.encryptionKey: something_at_least_32_characters'
+ - ' key: /usr/share/kibana/config/certs/elastic-certificate.pem'
+ - ' certificate: /usr/share/kibana/config/certs/elastic-certificate.pem'
+ - 'xpack.security.encryptionKey:'
- 'elasticsearch.ssl:'
- ' certificateAuthorities: /usr/share/kibana/config/certs/elastic-certificate.pem'
- ' verificationMode: certificate'
diff --git a/kibana/templates/deployment.yaml b/kibana/templates/deployment.yaml
index 52a473d67..a0310b959 100644
--- a/kibana/templates/deployment.yaml
+++ b/kibana/templates/deployment.yaml
@@ -5,6 +5,9 @@ metadata:
labels:
app: {{ .Chart.Name }}
release: {{ .Release.Name | quote }}
+ {{- range $key, $value := .Values.labels }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
spec:
replicas: {{ .Values.replicas }}
strategy:
@@ -19,6 +22,9 @@ spec:
app: kibana
release: {{ .Release.Name | quote }}
annotations:
+ {{- range $key, $value := .Values.podAnnotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
{{/* This forces a restart if the configmap has changed */}}
{{- if .Values.kibanaConfig }}
configchecksum: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum | trunc 63 }}
@@ -27,10 +33,8 @@ spec:
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
-{{- if .Values.podSecurityContext }}
securityContext:
{{ toYaml .Values.podSecurityContext | indent 8 }}
-{{- end }}
{{- if .Values.serviceAccount }}
serviceAccount: {{ .Values.serviceAccount }}
{{- end }}
@@ -38,7 +42,7 @@ spec:
{{- range .Values.secretMounts }}
- name: {{ .name }}
secret:
- secretName: {{ .name }}
+ secretName: {{ .secretName }}
{{- end }}
{{- if .Values.kibanaConfig }}
- name: kibanaconfig
@@ -63,7 +67,10 @@ spec:
{{- end }}
containers:
- name: kibana
+ securityContext:
+{{ toYaml .Values.securityContext | indent 10 }}
image: "{{ .Values.image }}:{{ .Values.imageTag }}"
+ imagePullPolicy: "{{ .Values.imagePullPolicy }}"
env:
{{- if .Values.elasticsearchURL }}
- name: ELASTICSEARCH_URL
@@ -72,13 +79,15 @@ spec:
- name: ELASTICSEARCH_HOSTS
value: "{{ .Values.elasticsearchHosts }}"
{{- end }}
+ - name: SERVER_HOST
+ value: "{{ .Values.serverHost }}"
{{- if .Values.extraEnvs }}
{{ toYaml .Values.extraEnvs | indent 10 }}
{{- end }}
readinessProbe:
{{ toYaml .Values.readinessProbe | indent 10 }}
exec:
- command:
+ command:
- sh
- -c
- |
@@ -93,7 +102,7 @@ spec:
curl -k "$@" "{{ .Values.protocol }}://localhost:{{ .Values.httpPort }}${path}"
}
-
+
http "{{ .Values.healthCheckPath }}"
ports:
- containerPort: {{ .Values.httpPort }}
@@ -103,6 +112,9 @@ spec:
{{- range .Values.secretMounts }}
- name: {{ .name }}
mountPath: {{ .path }}
+ {{- if .subPath }}
+ subPath: {{ .subPath }}
+ {{- end }}
{{- end }}
{{- range $path, $config := .Values.kibanaConfig }}
- name: kibanaconfig
diff --git a/kibana/templates/ingress.yaml b/kibana/templates/ingress.yaml
index d4e7707c6..cd4914930 100644
--- a/kibana/templates/ingress.yaml
+++ b/kibana/templates/ingress.yaml
@@ -17,13 +17,7 @@ metadata:
spec:
{{- if .Values.ingress.tls }}
tls:
- {{- range .Values.ingress.tls }}
- - hosts:
- {{- range .hosts }}
- - {{ . }}
- {{- end }}
- secretName: {{ .secretName }}
- {{- end }}
+{{ toYaml .Values.ingress.tls | indent 4 }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
diff --git a/kibana/templates/service.yaml b/kibana/templates/service.yaml
index 24adfb7b4..60f47eb82 100644
--- a/kibana/templates/service.yaml
+++ b/kibana/templates/service.yaml
@@ -7,10 +7,17 @@ metadata:
app: {{ .Chart.Name }}
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service }}
+{{- with .Values.service.annotations }}
+ annotations:
+{{ toYaml . | indent 4 }}
+{{- end }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
+{{- if .Values.service.nodePort }}
+ nodePort: {{ .Values.service.nodePort }}
+{{- end }}
protocol: TCP
name: http
targetPort: {{ .Values.httpPort }}
diff --git a/kibana/tests/kibana_test.py b/kibana/tests/kibana_test.py
index 58f775b82..9b57f4c53 100644
--- a/kibana/tests/kibana_test.py
+++ b/kibana/tests/kibana_test.py
@@ -31,6 +31,9 @@ def test_defaults():
assert c['env'][0]['name'] == 'ELASTICSEARCH_HOSTS'
assert c['env'][0]['value'] == elasticsearchHosts
+ assert c['env'][1]['name'] == 'SERVER_HOST'
+ assert c['env'][1]['value'] == '0.0.0.0'
+
assert 'http "/app/kibana"' in c['readinessProbe']['exec']['command'][-1]
# Empty customizable defaults
@@ -41,6 +44,9 @@ def test_defaults():
assert r['deployment'][name]['spec']['strategy']['type'] == 'Recreate'
+ # Make sure that the default 'annotation' dictionary is empty
+ assert 'annotations' not in r['service'][name]['metadata']
+
def test_overriding_the_elasticsearch_hosts():
config = '''
elasticsearchHosts: 'http://hello.world'
@@ -153,6 +159,34 @@ def test_adding_an_ingress_rule():
assert i['rules'][0]['http']['paths'][0]['backend']['serviceName'] == name
assert i['rules'][0]['http']['paths'][0]['backend']['servicePort'] == 5601
+def test_adding_an_ingress_rule_wildcard():
+ config = '''
+ingress:
+ enabled: true
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ path: /
+ hosts:
+ - kibana.elastic.co
+ tls:
+ - secretName: elastic-co-wildcard
+ hosts:
+ - "*.elastic.co"
+'''
+
+ r = helm_template(config)
+ assert name in r['ingress']
+ i = r['ingress'][name]['spec']
+ assert i['tls'][0]['hosts'][0] == '*.elastic.co'
+ assert i['tls'][0]['secretName'] == 'elastic-co-wildcard'
+
+ assert i['rules'][0]['host'] == 'kibana.elastic.co'
+ assert i['rules'][0]['http']['paths'][0]['path'] == '/'
+ assert i['rules'][0]['http']['paths'][0]['backend']['serviceName'] == name
+ assert i['rules'][0]['http']['paths'][0]['backend']['servicePort'] == 5601
+
+
+
def test_override_the_default_update_strategy():
config = '''
updateStrategy:
@@ -252,3 +286,111 @@ def test_priority_class_name():
r = helm_template(config)
priority_class_name = r['deployment'][name]['spec']['template']['spec']['priorityClassName']
assert priority_class_name == "highest"
+
+
+def test_service_annotatations():
+ config = '''
+service:
+ annotations:
+ cloud.google.com/load-balancer-type: "Internal"
+ '''
+ r = helm_template(config)
+ s = r['service'][name]['metadata']['annotations']['cloud.google.com/load-balancer-type']
+ assert s == "Internal"
+
+ config = '''
+service:
+ annotations:
+ service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0
+ '''
+ r = helm_template(config)
+ s = r['service'][name]['metadata']['annotations']['service.beta.kubernetes.io/aws-load-balancer-internal']
+ assert s == "0.0.0.0/0"
+
+def test_adding_a_nodePort():
+ config = ''
+
+ r = helm_template(config)
+
+ assert 'nodePort' not in r['service'][name]['spec']['ports'][0]
+
+ config = '''
+ service:
+ nodePort: 30001
+ '''
+
+ r = helm_template(config)
+
+ assert r['service'][name]['spec']['ports'][0]['nodePort'] == 30001
+
+def test_override_the_serverHost():
+ config = '''
+ serverHost: "localhost"
+ '''
+
+ r = helm_template(config)
+
+ c = r['deployment'][name]['spec']['template']['spec']['containers'][0]
+ assert c['env'][1]['name'] == 'SERVER_HOST'
+ assert c['env'][1]['value'] == 'localhost'
+
+def test_adding_pod_annotations():
+ config = '''
+podAnnotations:
+ iam.amazonaws.com/role: es-role
+'''
+ r = helm_template(config)
+ assert r['deployment'][name]['spec']['template']['metadata']['annotations']['iam.amazonaws.com/role'] == 'es-role'
+
+def test_override_imagePullPolicy():
+ config = ''
+
+ r = helm_template(config)
+ c = r['deployment'][name]['spec']['template']['spec']['containers'][0]
+ assert c['imagePullPolicy'] == 'IfNotPresent'
+
+ config = '''
+ imagePullPolicy: Always
+ '''
+
+ r = helm_template(config)
+ c = r['deployment'][name]['spec']['template']['spec']['containers'][0]
+ assert c['imagePullPolicy'] == 'Always'
+
+def test_adding_pod_labels():
+ config = '''
+labels:
+ app.kubernetes.io/name: kibana
+'''
+ r = helm_template(config)
+ assert r['deployment'][name]['metadata']['labels']['app.kubernetes.io/name'] == 'kibana'
+
+def test_adding_a_secret_mount_with_subpath():
+ config = '''
+secretMounts:
+ - name: elastic-certificates
+ secretName: elastic-certs
+ path: /usr/share/elasticsearch/config/certs
+ subPath: cert.crt
+'''
+ r = helm_template(config)
+ d = r['deployment'][name]['spec']['template']['spec']
+ assert d['containers'][0]['volumeMounts'][-1] == {
+ 'mountPath': '/usr/share/elasticsearch/config/certs',
+ 'subPath': 'cert.crt',
+ 'name': 'elastic-certificates'
+ }
+
+def test_adding_a_secret_mount_without_subpath():
+ config = '''
+secretMounts:
+ - name: elastic-certificates
+ secretName: elastic-certs
+ path: /usr/share/elasticsearch/config/certs
+'''
+ r = helm_template(config)
+ d = r['deployment'][name]['spec']['template']['spec']
+ assert d['containers'][0]['volumeMounts'][-1] == {
+ 'mountPath': '/usr/share/elasticsearch/config/certs',
+ 'name': 'elastic-certificates'
+ }
diff --git a/kibana/values.yaml b/kibana/values.yaml
index 95e576e36..cef949e62 100755
--- a/kibana/values.yaml
+++ b/kibana/values.yaml
@@ -16,14 +16,21 @@ extraEnvs: []
# This is useful for mounting certificates for security and for mounting
# the X-Pack license
secretMounts: []
-# - name: elastic-certificates
-# secretName: elastic-certificates
-# path: /usr/share/elasticsearch/config/certs
+# - name: kibana-keystore
+# secretName: kibana-keystore
+# path: /usr/share/kibana/data/kibana.keystore
+# subPath: kibana.keystore # optional
image: "docker.elastic.co/kibana/kibana"
-imageTag: "7.1.0"
+imageTag: "7.3.0"
imagePullPolicy: "IfNotPresent"
+# additionals labels
+labels: {}
+
+podAnnotations: {}
+ # iam.amazonaws.com/role: es-cluster
+
resources:
requests:
cpu: "100m"
@@ -34,6 +41,8 @@ resources:
protocol: http
+serverHost: "0.0.0.0"
+
healthCheckPath: "/app/kibana"
# Allows you to add any config files in /usr/share/kibana/config/
@@ -44,8 +53,17 @@ kibanaConfig: {}
# nestedkey: value
# If Pod Security Policy in use it may be required to specify security context as well as service account
-podSecurityContext: {}
- #runAsUser: "place the user id here"
+
+podSecurityContext:
+ fsGroup: 1000
+
+securityContext:
+ capabilities:
+ drop:
+ - ALL
+ # readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ runAsUser: 1000
serviceAccount: ""
@@ -74,6 +92,13 @@ updateStrategy:
service:
type: ClusterIP
port: 5601
+ nodePort:
+ annotations: {}
+ # cloud.google.com/load-balancer-type: "Internal"
+ # service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0
+ # service.beta.kubernetes.io/azure-load-balancer-internal: "true"
+ # service.beta.kubernetes.io/openstack-internal-load-balancer: "true"
+ # service.beta.kubernetes.io/cce-load-balancer-internal-vpc: "true"
ingress:
enabled: false
diff --git a/metricbeat/.helmignore b/metricbeat/.helmignore
new file mode 100644
index 000000000..e12c0b4b9
--- /dev/null
+++ b/metricbeat/.helmignore
@@ -0,0 +1,2 @@
+tests/
+.pytest_cache/
diff --git a/metricbeat/Chart.yaml b/metricbeat/Chart.yaml
new file mode 100755
index 000000000..827245d11
--- /dev/null
+++ b/metricbeat/Chart.yaml
@@ -0,0 +1,11 @@
+description: Official Elastic helm chart for Metricbeat
+home: https://github.com/elastic/helm-charts
+maintainers:
+- email: helm-charts@elastic.co
+ name: Elastic
+name: metricbeat
+version: 7.3.0
+appVersion: 7.3.0
+sources:
+ - https://github.com/elastic/beats
+icon: https://helm.elastic.co/icons/metricbeat.png
diff --git a/metricbeat/Makefile b/metricbeat/Makefile
new file mode 100644
index 000000000..22218a1f6
--- /dev/null
+++ b/metricbeat/Makefile
@@ -0,0 +1 @@
+include ../helpers/common.mk
diff --git a/metricbeat/README.md b/metricbeat/README.md
new file mode 100644
index 000000000..3247c02d9
--- /dev/null
+++ b/metricbeat/README.md
@@ -0,0 +1,114 @@
+# Metricbeat Helm Chart
+
+This functionality is in beta and is subject to change. The design and code is less mature than official GA features and is being provided as-is with no warranties. Beta features are not subject to the support SLA of official GA features.
+
+This helm chart is a lightweight way to configure and run our official [Metricbeat docker image](https://www.elastic.co/guide/en/beats/metricbeat/current/running-on-docker.html).
+
+## Requirements
+
+* Kubernetes >= 1.8
+* [Helm](https://helm.sh/) >= 2.8.0
+
+## Installing
+
+* Add the elastic helm charts repo
+ ```
+ helm repo add elastic https://helm.elastic.co
+ ```
+* Install it
+ ```
+ helm install --name metricbeat elastic/metricbeat
+ ```
+
+## Compatibility
+
+This chart is tested with the latest supported versions. The currently tested versions are:
+
+| 6.x | 7.x |
+| ----- | ----- |
+| 6.8.1 | 7.3.0 |
+
+Examples of installing older major versions can be found in the [examples](./examples) directory.
+
+While only the latest releases are tested, it is possible to easily install old or new releases by overriding the `imageTag`. To install version `7.3.0` of metricbeat it would look like this:
+
+```
+helm install --name metricbeat elastic/metricbeat --set imageTag=7.3.0
+```
+
+
+## Configuration
+| Parameter | Description | Default |
+| ------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- |
+| `metricbeatConfig` | Allows you to add any config files in `/usr/share/metricbeat` such as `metricbeat.yml`. See [values.yaml](./values.yaml) for an example of the formatting with the default configuration. | see [values.yaml](./values.yaml) |
+| `extraEnvs` | Extra [environment variables](https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/#using-environment-variables-inside-of-your-config) which will be appended to the `env:` definition for the container | `[]` |
+| `extraVolumeMounts` | Templatable string of additional volumeMounts to be passed to the `tpl` function | `""` |
+| `extraVolumes` | Templatable string of additional volumes to be passed to the `tpl` function | `""` |
+| `hostPathRoot` | Fully-qualified [hostPath](https://kubernetes.io/docs/concepts/storage/volumes/#hostpath) that will be used to persist Metricbeat registry data | `/var/lib` |
+| `image` | The Metricbeat docker image | `docker.elastic.co/beats/metricbeat` |
+| `imageTag` | The Metricbeat docker image tag | `7.3.0` |
+| `imagePullPolicy` | The Kubernetes [imagePullPolicy](https://kubernetes.io/docs/concepts/containers/images/#updating-images) value | `IfNotPresent` |
+| `imagePullSecrets` | Configuration for [imagePullSecrets](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret) so that you can use a private registry for your image | `[]` |
+| `managedServiceAccount` | Whether the `serviceAccount` should be managed by this helm chart. Set this to `false` in order to manage your own service account and related roles. | `true` |
+| `podAnnotations` | Configurable [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) applied to all Metricbeat pods | `{}` |
+| `podSecurityContext` | Configurable [podSecurityContext](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for Metricbeat pod execution environment | `runAsUser: 0`
`privileged: false` |
+| `livenessProbe` | Parameters to pass to [liveness probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/) checks for values such as timeouts and thresholds. | `failureThreshold: 3`
`initialDelaySeconds: 10`
`periodSeconds: 10`
`successThreshold: 3`
`timeoutSeconds: 5` |
+| `readinessProbe` | Parameters to pass to [readiness probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/) checks for values such as timeouts and thresholds. | `failureThreshold: 3`
`initialDelaySeconds: 10`
`periodSeconds: 10`
`successThreshold: 3`
`timeoutSeconds: 5` |
+| `resources` | Allows you to set the [resources](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) for the `DaemonSet` | `requests.cpu: 100m`
`requests.memory: 100Mi`
`limits.cpu: 1000m`
`limits.memory: 200Mi` |
+| `serviceAccount` | Custom [serviceAccount](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) that Metricbeat will use during execution. By default will use the service account created by this chart. | `""` |
+| `secretMounts` | Allows you easily mount a secret as a file inside the `DaemonSet`. Useful for mounting certificates and other secrets. See [values.yaml](./values.yaml) for an example | `[]` |
+| `terminationGracePeriod` | Termination period (in seconds) to wait before killing Metricbeat pod process on pod shutdown | `30` |
+| `tolerations` | Configurable [tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) | `[]` |
+| `nodeSelector` | Configurable [nodeSelector](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) | `{}` |
+| `affinity` | Configurable [affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) | `{}` |
+| `updateStrategy` | The [updateStrategy](https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/#daemonset-update-strategy) for the `DaemonSet`. By default Kubernetes will kill and recreate pods on updates. Setting this to `OnDelete` will require that pods be deleted manually. | `RollingUpdate` |
+| `replicas` | The replica count for the metricbeat deployment talking to kube-state-metrics | `1` |
+
+## Examples
+
+In [examples/](./examples) you will find some example configurations. These examples are used for the automated testing of this helm chart.
+
+### Default
+
+* Deploy the [default Elasticsearch helm chart](../elasticsearch/README.md#default)
+* Deploy Metricbeat with the default values
+ ```
+ cd examples/default
+ make
+ ```
+* You can now setup a port forward for Elasticsearch to observe Metricbeat indices
+ ```
+ kubectl port-forward svc/elasticsearch-master 9200
+ curl localhost:9200/_cat/indices
+ ```
+
+## Testing
+
+This chart uses [pytest](https://docs.pytest.org/en/latest/) to test the templating logic. The dependencies for testing can be installed from the [`requirements.txt`](../requirements.txt) in the parent directory.
+
+```
+pip install -r ../requirements.txt
+make pytest
+```
+
+You can also use `helm template` to look at the YAML being generated
+
+```
+make template
+```
+
+It is possible to run all of the tests and linting inside of a docker container
+
+```
+make test
+```
+
+## Integration Testing
+
+Integration tests are run using [goss](https://github.com/aelsabbahy/goss/blob/master/docs/manual.md) which is a serverspec like tool written in golang. See [goss.yaml](examples/default/test/goss.yaml) for an example of what the tests look like.
+
+To run the goss tests against the default example:
+```
+cd examples/default
+make goss
+```
diff --git a/metricbeat/examples/6.x/Makefile b/metricbeat/examples/6.x/Makefile
new file mode 100644
index 000000000..05a66d2bc
--- /dev/null
+++ b/metricbeat/examples/6.x/Makefile
@@ -0,0 +1,17 @@
+default: test
+
+include ../../../helpers/examples.mk
+
+RELEASE := helm-metricbeat-six
+GOSS_SELECTOR = release=$(RELEASE),app=helm-metricbeat-six-metricbeat
+
+install:
+ helm upgrade --wait --timeout=600 --install $(RELEASE) --values values.yaml ../../
+
+purge:
+ helm del --purge $(RELEASE)
+
+test-metrics:
+ GOSS_FILE=goss-metrics.yaml make goss GOSS_SELECTOR=release=$(RELEASE),app=helm-metricbeat-six-metricbeat-metrics
+
+test: install goss test-metrics
diff --git a/metricbeat/examples/6.x/test/goss-metrics.yaml b/metricbeat/examples/6.x/test/goss-metrics.yaml
new file mode 100644
index 000000000..fd776f6ad
--- /dev/null
+++ b/metricbeat/examples/6.x/test/goss-metrics.yaml
@@ -0,0 +1,43 @@
+port:
+ tcp:5066:
+ listening: true
+ ip:
+ - '127.0.0.1'
+
+mount:
+ /usr/share/metricbeat/kube-state-metrics-metricbeat.yml:
+ exists: true
+ opts:
+ - ro
+
+user:
+ metricbeat:
+ exists: true
+ uid: 1000
+ gid: 1000
+
+http:
+ http://six-master:9200/_cat/indices:
+ status: 200
+ timeout: 2000
+ body:
+ - 'metricbeat-6.8.1'
+ http://six-master:9200/_search?q=metricset.name:state_deployment:
+ status: 200
+ timeout: 2000
+ body:
+ - 'metricbeat-6.8.1'
+
+file:
+ /usr/share/metricbeat/metricbeat.yml:
+ exists: true
+ contains:
+ - 'add_kubernetes_metadata'
+ - 'output.elasticsearch'
+
+command:
+ cd /usr/share/metricbeat && metricbeat test output:
+ exit-status: 0
+ stdout:
+ - 'elasticsearch: http://six-master:9200'
+ - 'version: 6.8.1'
diff --git a/metricbeat/examples/6.x/test/goss.yaml b/metricbeat/examples/6.x/test/goss.yaml
new file mode 100644
index 000000000..ba21aab47
--- /dev/null
+++ b/metricbeat/examples/6.x/test/goss.yaml
@@ -0,0 +1,51 @@
+port:
+ tcp:5066:
+ listening: true
+ ip:
+ - '127.0.0.1'
+
+mount:
+ /usr/share/metricbeat/data:
+ exists: true
+ /run/docker.sock:
+ exists: true
+ /var/lib/docker/containers:
+ exists: true
+ opts:
+ - ro
+ /usr/share/metricbeat/metricbeat.yml:
+ exists: true
+ opts:
+ - ro
+
+user:
+ metricbeat:
+ exists: true
+ uid: 1000
+ gid: 1000
+
+http:
+ http://six-master:9200/_cat/indices:
+ status: 200
+ timeout: 2000
+ body:
+ - 'metricbeat-6.8.1'
+ http://six-master:9200/_search?q=metricset.name:container:
+ status: 200
+ timeout: 2000
+ body:
+ - 'metricbeat-6.8.1'
+
+file:
+ /usr/share/metricbeat/metricbeat.yml:
+ exists: true
+ contains:
+ - 'add_kubernetes_metadata'
+ - 'output.elasticsearch'
+
+command:
+ cd /usr/share/metricbeat && metricbeat test output:
+ exit-status: 0
+ stdout:
+ - 'elasticsearch: http://six-master:9200'
+ - 'version: 6.8.1'
diff --git a/metricbeat/examples/6.x/values.yaml b/metricbeat/examples/6.x/values.yaml
new file mode 100644
index 000000000..d0eeea620
--- /dev/null
+++ b/metricbeat/examples/6.x/values.yaml
@@ -0,0 +1,5 @@
+imageTag: 6.8.1
+
+extraEnvs:
+ - name: ELASTICSEARCH_HOSTS
+ value: six-master:9200
diff --git a/metricbeat/examples/default/Makefile b/metricbeat/examples/default/Makefile
new file mode 100644
index 000000000..833f62973
--- /dev/null
+++ b/metricbeat/examples/default/Makefile
@@ -0,0 +1,17 @@
+default: test
+
+include ../../../helpers/examples.mk
+
+RELEASE = helm-metricbeat-default
+GOSS_SELECTOR = release=$(RELEASE),app=helm-metricbeat-default-metricbeat
+
+install:
+ helm upgrade --wait --timeout=600 --install $(RELEASE) ../../
+
+test-metrics:
+ GOSS_FILE=goss-metrics.yaml make goss GOSS_SELECTOR=release=$(RELEASE),app=helm-metricbeat-default-metricbeat-metrics
+
+test: install goss test-metrics
+
+purge:
+ helm del --purge $(RELEASE)
diff --git a/metricbeat/examples/default/test/goss-metrics.yaml b/metricbeat/examples/default/test/goss-metrics.yaml
new file mode 100644
index 000000000..0e30826b1
--- /dev/null
+++ b/metricbeat/examples/default/test/goss-metrics.yaml
@@ -0,0 +1,45 @@
+port:
+ tcp:5066:
+ listening: true
+ ip:
+ - '127.0.0.1'
+
+mount:
+ /usr/share/metricbeat/kube-state-metrics-metricbeat.yml:
+ exists: true
+ opts:
+ - ro
+
+user:
+ metricbeat:
+ exists: true
+ uid: 1000
+ gid: 1000
+
+http:
+ http://elasticsearch-master:9200/_cat/indices:
+ status: 200
+ timeout: 2000
+ body:
+ - 'metricbeat-7.3.0'
+
+ 'http://elasticsearch-master:9200/_search?q=metricset.name:state_container%20AND%20kubernetes.container.name:metricbeat':
+ status: 200
+ timeout: 2000
+ body:
+ - 'metricbeat-7.3.0'
+
+file:
+ /usr/share/metricbeat/metricbeat.yml:
+ exists: true
+ contains:
+ - 'add_kubernetes_metadata'
+ - 'output.elasticsearch'
+ - 'elasticsearch-master:9200'
+
+command:
+ cd /usr/share/metricbeat && metricbeat test output:
+ exit-status: 0
+ stdout:
+ - 'elasticsearch: http://elasticsearch-master:9200'
+ - 'version: 7.3.0'
diff --git a/metricbeat/examples/default/test/goss.yaml b/metricbeat/examples/default/test/goss.yaml
new file mode 100644
index 000000000..2306ee7d9
--- /dev/null
+++ b/metricbeat/examples/default/test/goss.yaml
@@ -0,0 +1,52 @@
+port:
+ tcp:5066:
+ listening: true
+ ip:
+ - '127.0.0.1'
+
+mount:
+ /usr/share/metricbeat/data:
+ exists: true
+ /run/docker.sock:
+ exists: true
+ /var/lib/docker/containers:
+ exists: true
+ opts:
+ - ro
+ /usr/share/metricbeat/metricbeat.yml:
+ exists: true
+ opts:
+ - ro
+
+user:
+ metricbeat:
+ exists: true
+ uid: 1000
+ gid: 1000
+
+http:
+ http://elasticsearch-master:9200/_cat/indices:
+ status: 200
+ timeout: 2000
+ body:
+ - 'metricbeat-7.3.0'
+ 'http://elasticsearch-master:9200/_search?q=metricset.name:container%20AND%20kubernetes.container.name:metricbeat':
+ status: 200
+ timeout: 2000
+ body:
+ - 'metricbeat-7.3.0'
+
+file:
+ /usr/share/metricbeat/metricbeat.yml:
+ exists: true
+ contains:
+ - 'add_kubernetes_metadata'
+ - 'output.elasticsearch'
+ - 'elasticsearch-master:9200'
+
+command:
+ cd /usr/share/metricbeat && metricbeat test output:
+ exit-status: 0
+ stdout:
+ - 'elasticsearch: http://elasticsearch-master:9200'
+ - 'version: 7.3.0'
diff --git a/metricbeat/examples/oss/Makefile b/metricbeat/examples/oss/Makefile
new file mode 100644
index 000000000..0e4828ed0
--- /dev/null
+++ b/metricbeat/examples/oss/Makefile
@@ -0,0 +1,17 @@
+default: test
+
+include ../../../helpers/examples.mk
+
+RELEASE := helm-metricbeat-oss
+GOSS_SELECTOR = release=$(RELEASE),app=helm-metricbeat-oss-metricbeat
+
+install:
+ helm upgrade --wait --timeout=600 --install $(RELEASE) --values values.yaml ../../
+
+purge:
+ helm del --purge $(RELEASE)
+
+test-metrics:
+ GOSS_FILE=goss-metrics.yaml make goss GOSS_SELECTOR=release=$(RELEASE),app=helm-metricbeat-oss-metricbeat-metrics
+
+test: install goss test-metrics
diff --git a/metricbeat/examples/oss/test/goss-metrics.yaml b/metricbeat/examples/oss/test/goss-metrics.yaml
new file mode 100644
index 000000000..d8d930db9
--- /dev/null
+++ b/metricbeat/examples/oss/test/goss-metrics.yaml
@@ -0,0 +1,43 @@
+port:
+ tcp:5066:
+ listening: true
+ ip:
+ - '127.0.0.1'
+
+mount:
+ /usr/share/metricbeat/kube-state-metrics-metricbeat.yml:
+ exists: true
+ opts:
+ - ro
+
+user:
+ metricbeat:
+ exists: true
+ uid: 1000
+ gid: 1000
+
+http:
+ http://oss-master:9200/_cat/indices:
+ status: 200
+ timeout: 2000
+ body:
+ - 'metricbeat-7.3.0'
+ http://oss-master:9200/_search?q=metricset.name:state_deployment:
+ status: 200
+ timeout: 2000
+ body:
+ - 'metricbeat-7.3.0'
+
+file:
+ /usr/share/metricbeat/metricbeat.yml:
+ exists: true
+ contains:
+ - 'add_kubernetes_metadata'
+ - 'output.elasticsearch'
+
+command:
+ cd /usr/share/metricbeat && metricbeat test output:
+ exit-status: 0
+ stdout:
+ - 'elasticsearch: http://oss-master:9200'
+ - 'version: 7.3.0'
diff --git a/metricbeat/examples/oss/test/goss.yaml b/metricbeat/examples/oss/test/goss.yaml
new file mode 100644
index 000000000..37ff2a76a
--- /dev/null
+++ b/metricbeat/examples/oss/test/goss.yaml
@@ -0,0 +1,51 @@
+port:
+ tcp:5066:
+ listening: true
+ ip:
+ - '127.0.0.1'
+
+mount:
+ /usr/share/metricbeat/data:
+ exists: true
+ /run/docker.sock:
+ exists: true
+ /var/lib/docker/containers:
+ exists: true
+ opts:
+ - ro
+ /usr/share/metricbeat/metricbeat.yml:
+ exists: true
+ opts:
+ - ro
+
+user:
+ metricbeat:
+ exists: true
+ uid: 1000
+ gid: 1000
+
+http:
+ http://oss-master:9200/_cat/indices:
+ status: 200
+ timeout: 2000
+ body:
+ - 'metricbeat-7.3.0'
+ http://oss-master:9200/_search?q=metricset.name:container:
+ status: 200
+ timeout: 2000
+ body:
+ - 'metricbeat-7.3.0'
+
+file:
+ /usr/share/metricbeat/metricbeat.yml:
+ exists: true
+ contains:
+ - 'add_kubernetes_metadata'
+ - 'output.elasticsearch'
+
+command:
+ cd /usr/share/metricbeat && metricbeat test output:
+ exit-status: 0
+ stdout:
+ - 'elasticsearch: http://oss-master:9200'
+ - 'version: 7.3.0'
diff --git a/metricbeat/examples/oss/values.yaml b/metricbeat/examples/oss/values.yaml
new file mode 100644
index 000000000..89f2d453c
--- /dev/null
+++ b/metricbeat/examples/oss/values.yaml
@@ -0,0 +1,5 @@
+image: docker.elastic.co/beats/metricbeat-oss
+
+extraEnvs:
+ - name: ELASTICSEARCH_HOSTS
+ value: oss-master:9200
diff --git a/metricbeat/examples/security/Makefile b/metricbeat/examples/security/Makefile
new file mode 100644
index 000000000..3f92e7fe2
--- /dev/null
+++ b/metricbeat/examples/security/Makefile
@@ -0,0 +1,17 @@
+default: test
+
+include ../../../helpers/examples.mk
+
+RELEASE := helm-metricbeat-security
+GOSS_SELECTOR = release=$(RELEASE),app=helm-metricbeat-security-metricbeat
+
+install:
+ helm upgrade --wait --timeout=600 --install $(RELEASE) --values values.yaml ../../
+
+purge:
+ helm del --purge $(RELEASE)
+
+test-metrics:
+ GOSS_FILE=goss-metrics.yaml make goss GOSS_SELECTOR=release=$(RELEASE),app=helm-metricbeat-security-metricbeat-metrics
+
+test: install goss test-metrics
diff --git a/metricbeat/examples/security/test/goss-metrics.yaml b/metricbeat/examples/security/test/goss-metrics.yaml
new file mode 100644
index 000000000..78b91122d
--- /dev/null
+++ b/metricbeat/examples/security/test/goss-metrics.yaml
@@ -0,0 +1,49 @@
+port:
+ tcp:5066:
+ listening: true
+ ip:
+ - '127.0.0.1'
+
+mount:
+ /usr/share/metricbeat/kube-state-metrics-metricbeat.yml:
+ exists: true
+ opts:
+ - ro
+
+user:
+ metricbeat:
+ exists: true
+ uid: 1000
+ gid: 1000
+
+http:
+ https://security-master:9200/_cat/indices:
+ status: 200
+ timeout: 2000
+ body:
+ - 'metricbeat-7.3.0'
+ allow-insecure: true
+ username: '{{ .Env.ELASTICSEARCH_USERNAME }}'
+ password: '{{ .Env.ELASTICSEARCH_PASSWORD }}'
+ https://security-master:9200/_search?q=metricset.name:state_deployment:
+ status: 200
+ timeout: 2000
+ body:
+ - 'metricbeat-7.3.0'
+ allow-insecure: true
+ username: '{{ .Env.ELASTICSEARCH_USERNAME }}'
+ password: '{{ .Env.ELASTICSEARCH_PASSWORD }}'
+
+file:
+ /usr/share/metricbeat/metricbeat.yml:
+ exists: true
+ contains:
+ - 'add_kubernetes_metadata'
+ - 'output.elasticsearch'
+
+command:
+ cd /usr/share/metricbeat && metricbeat test output:
+ exit-status: 0
+ stdout:
+ - 'elasticsearch: https://security-master:9200'
+ - 'version: 7.3.0'
diff --git a/metricbeat/examples/security/test/goss.yaml b/metricbeat/examples/security/test/goss.yaml
new file mode 100644
index 000000000..eeb6eccff
--- /dev/null
+++ b/metricbeat/examples/security/test/goss.yaml
@@ -0,0 +1,57 @@
+port:
+ tcp:5066:
+ listening: true
+ ip:
+ - '127.0.0.1'
+
+mount:
+ /usr/share/metricbeat/data:
+ exists: true
+ /run/docker.sock:
+ exists: true
+ /var/lib/docker/containers:
+ exists: true
+ opts:
+ - ro
+ /usr/share/metricbeat/metricbeat.yml:
+ exists: true
+ opts:
+ - ro
+
+user:
+ metricbeat:
+ exists: true
+ uid: 1000
+ gid: 1000
+
+http:
+ https://security-master:9200/_cat/indices:
+ status: 200
+ timeout: 2000
+ body:
+ - 'metricbeat-7.3.0'
+ allow-insecure: true
+ username: '{{ .Env.ELASTICSEARCH_USERNAME }}'
+ password: '{{ .Env.ELASTICSEARCH_PASSWORD }}'
+ https://security-master:9200/_search?q=metricset.name:container:
+ status: 200
+ timeout: 2000
+ body:
+ - 'metricbeat-7.3.0'
+ allow-insecure: true
+ username: '{{ .Env.ELASTICSEARCH_USERNAME }}'
+ password: '{{ .Env.ELASTICSEARCH_PASSWORD }}'
+
+file:
+ /usr/share/metricbeat/metricbeat.yml:
+ exists: true
+ contains:
+ - 'add_kubernetes_metadata'
+ - 'output.elasticsearch'
+
+command:
+ cd /usr/share/metricbeat && metricbeat test output:
+ exit-status: 0
+ stdout:
+ - 'elasticsearch: https://security-master:9200'
+ - 'version: 7.3.0'
diff --git a/metricbeat/examples/security/values.yaml b/metricbeat/examples/security/values.yaml
new file mode 100644
index 000000000..dfe90a10a
--- /dev/null
+++ b/metricbeat/examples/security/values.yaml
@@ -0,0 +1,88 @@
+metricbeatConfig:
+ metricbeat.yml: |
+ system:
+ hostfs: /hostfs
+ metricbeat.modules:
+ - module: kubernetes
+ metricsets:
+ - container
+ - node
+ - pod
+ - system
+ - volume
+ period: 10s
+ host: "${NODE_NAME}"
+ hosts: ["${NODE_NAME}:10255"]
+ processors:
+ - add_kubernetes_metadata:
+ in_cluster: true
+ - module: kubernetes
+ enabled: true
+ metricsets:
+ - event
+ - module: system
+ period: 10s
+ metricsets:
+ - cpu
+ - load
+ - memory
+ - network
+ - process
+ - process_summary
+ processes: ['.*']
+ process.include_top_n:
+ by_cpu: 5
+ by_memory: 5
+ - module: system
+ period: 1m
+ metricsets:
+ - filesystem
+ - fsstat
+ processors:
+ - drop_event.when.regexp:
+ system.filesystem.mount_point: '^/(sys|cgroup|proc|dev|etc|host|lib)($|/)'
+
+ output.elasticsearch:
+ username: '${ELASTICSEARCH_USERNAME}'
+ password: '${ELASTICSEARCH_PASSWORD}'
+ protocol: https
+ hosts: ["security-master:9200"]
+ ssl.certificate_authorities:
+ - /usr/share/metricbeat/config/certs/elastic-certificate.pem
+
+ kube-state-metrics-metricbeat.yml: |
+ metricbeat.modules:
+ - module: kubernetes
+ enabled: true
+ metricsets:
+ - state_node
+ - state_deployment
+ - state_replicaset
+ - state_pod
+ - state_container
+ period: 10s
+ hosts: ["${KUBE_STATE_METRICS_HOSTS:kube-state-metrics:8080}"]
+ output.elasticsearch:
+ username: '${ELASTICSEARCH_USERNAME}'
+ password: '${ELASTICSEARCH_PASSWORD}'
+ protocol: https
+ hosts: ["security-master:9200"]
+ ssl.certificate_authorities:
+ - /usr/share/metricbeat/config/certs/elastic-certificate.pem
+
+secretMounts:
+ - name: elastic-certificate-pem
+ secretName: elastic-certificate-pem
+ path: /usr/share/metricbeat/config/certs
+
+extraEnvs:
+ - name: 'ELASTICSEARCH_USERNAME'
+ valueFrom:
+ secretKeyRef:
+ name: elastic-credentials
+ key: username
+ - name: 'ELASTICSEARCH_PASSWORD'
+ valueFrom:
+ secretKeyRef:
+ name: elastic-credentials
+ key: password
diff --git a/metricbeat/requirements.lock b/metricbeat/requirements.lock
new file mode 100644
index 000000000..213c4e3a4
--- /dev/null
+++ b/metricbeat/requirements.lock
@@ -0,0 +1,6 @@
+dependencies:
+- name: kube-state-metrics
+ repository: https://kubernetes-charts.storage.googleapis.com
+ version: 1.6.0
+digest: sha256:111c5be854f72db1996a198a473a3e69bd50b7c5f046cf03ee4733d62a612874
+generated: 2019-06-11T09:46:07.710748+02:00
diff --git a/metricbeat/requirements.yaml b/metricbeat/requirements.yaml
new file mode 100644
index 000000000..37d378f9c
--- /dev/null
+++ b/metricbeat/requirements.yaml
@@ -0,0 +1,4 @@
+dependencies:
+ - name: 'kube-state-metrics'
+ version: '1.6.0'
+ repository: '@stable'
diff --git a/metricbeat/templates/NOTES.txt b/metricbeat/templates/NOTES.txt
new file mode 100755
index 000000000..24cdde2e5
--- /dev/null
+++ b/metricbeat/templates/NOTES.txt
@@ -0,0 +1,2 @@
+1. Watch all containers come up.
+ $ kubectl get pods --namespace={{ .Release.Namespace }} -l app={{ template "fullname" . }} -w
diff --git a/metricbeat/templates/_helpers.tpl b/metricbeat/templates/_helpers.tpl
new file mode 100755
index 000000000..769546335
--- /dev/null
+++ b/metricbeat/templates/_helpers.tpl
@@ -0,0 +1,28 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Use the fullname if the serviceAccount value is not set
+*/}}
+{{- define "serviceAccount" -}}
+{{- if .Values.serviceAccount }}
+{{- .Values.serviceAccount -}}
+{{- else }}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
diff --git a/metricbeat/templates/clusterrole.yaml b/metricbeat/templates/clusterrole.yaml
new file mode 100644
index 000000000..52d7e5e98
--- /dev/null
+++ b/metricbeat/templates/clusterrole.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.managedServiceAccount }}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: {{ template "serviceAccount" . }}-cluster-role
+ labels:
+ app: "{{ template "fullname" . }}"
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ heritage: {{ .Release.Service | quote }}
+ release: {{ .Release.Name | quote }}
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - namespaces
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+{{- end -}}
diff --git a/metricbeat/templates/clusterrolebinding.yaml b/metricbeat/templates/clusterrolebinding.yaml
new file mode 100644
index 000000000..b901e2387
--- /dev/null
+++ b/metricbeat/templates/clusterrolebinding.yaml
@@ -0,0 +1,19 @@
+{{- if .Values.managedServiceAccount }}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ template "serviceAccount" . }}-cluster-role-binding
+ labels:
+ app: "{{ template "fullname" . }}"
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ heritage: {{ .Release.Service | quote }}
+ release: {{ .Release.Name | quote }}
+roleRef:
+ kind: ClusterRole
+ name: {{ template "serviceAccount" . }}-cluster-role
+ apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: ServiceAccount
+ name: {{ template "serviceAccount" . }}
+ namespace: {{ .Release.Namespace }}
+{{- end -}}
diff --git a/metricbeat/templates/configmap.yaml b/metricbeat/templates/configmap.yaml
new file mode 100644
index 000000000..655173b52
--- /dev/null
+++ b/metricbeat/templates/configmap.yaml
@@ -0,0 +1,17 @@
+{{- if .Values.metricbeatConfig }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "fullname" . }}-config
+ labels:
+ app: "{{ template "fullname" . }}"
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ heritage: {{ .Release.Service | quote }}
+ release: {{ .Release.Name | quote }}
+data:
+{{- range $path, $config := .Values.metricbeatConfig }}
+ {{ $path }}: |
+{{ $config | indent 4 -}}
+{{- end -}}
+{{- end -}}
diff --git a/metricbeat/templates/daemonset.yaml b/metricbeat/templates/daemonset.yaml
new file mode 100644
index 000000000..a86850e50
--- /dev/null
+++ b/metricbeat/templates/daemonset.yaml
@@ -0,0 +1,145 @@
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: {{ template "fullname" . }}
+ labels:
+ app: "{{ template "fullname" . }}"
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ heritage: {{ .Release.Service | quote }}
+ release: {{ .Release.Name | quote }}
+spec:
+ selector:
+ matchLabels:
+ app: "{{ template "fullname" . }}"
+ release: {{ .Release.Name | quote }}
+ updateStrategy:
+ type: {{ .Values.updateStrategy }}
+ template:
+ metadata:
+ annotations:
+ {{- range $key, $value := .Values.podAnnotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ {{/* This forces a restart if the configmap has changed */}}
+ {{- if .Values.metricbeatConfig }}
+ configChecksum: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum | trunc 63 }}
+ {{- end }}
+ name: "{{ template "fullname" . }}"
+ labels:
+ app: "{{ template "fullname" . }}"
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ heritage: {{ .Release.Service | quote }}
+ release: {{ .Release.Name | quote }}
+ spec:
+ {{- with .Values.tolerations }}
+ tolerations: {{ toYaml . | nindent 6 }}
+ {{- end }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector: {{ toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity: {{ toYaml . | nindent 8 -}}
+ {{- end }}
+ serviceAccountName: {{ template "serviceAccount" . }}
+ terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }}
+ volumes:
+ {{- range .Values.secretMounts }}
+ - name: {{ .name }}
+ secret:
+ secretName: {{ .secretName }}
+ {{- end }}
+ {{- if .Values.metricbeatConfig }}
+ - name: metricbeat-config
+ configMap:
+ defaultMode: 0600
+ name: {{ template "fullname" . }}-config
+ {{- end }}
+ - name: data
+ hostPath:
+ path: {{ .Values.hostPathRoot }}/{{ template "fullname" . }}-{{ .Release.Namespace }}-data
+ type: DirectoryOrCreate
+ - name: varlibdockercontainers
+ hostPath:
+ path: /var/lib/docker/containers
+ - name: varrundockersock
+ hostPath:
+ path: /var/run/docker.sock
+ {{- if .Values.extraVolumes }}
+{{ tpl .Values.extraVolumes . | indent 6 }}
+ {{- end }}
+ {{- if .Values.imagePullSecrets }}
+ imagePullSecrets:
+{{ toYaml .Values.imagePullSecrets | indent 8 }}
+ {{- end }}
+ containers:
+ - name: "metricbeat"
+ image: "{{ .Values.image }}:{{ .Values.imageTag }}"
+ imagePullPolicy: "{{ .Values.imagePullPolicy }}"
+ args:
+ - "-e"
+ - "-E"
+ - "http.enabled=true"
+ livenessProbe:
+ exec:
+ command:
+ - sh
+ - -c
+ - |
+ #!/usr/bin/env bash -e
+ curl --fail 127.0.0.1:5066
+{{ toYaml .Values.livenessProbe | indent 10 }}
+ readinessProbe:
+ exec:
+ command:
+ - sh
+ - -c
+ - |
+ #!/usr/bin/env bash -e
+ metricbeat test output
+{{ toYaml .Values.readinessProbe | indent 10 }}
+ resources:
+{{ toYaml .Values.resources | indent 10 }}
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+{{- if .Values.extraEnvs }}
+{{ toYaml .Values.extraEnvs | indent 8 }}
+{{- end }}
+{{- if .Values.podSecurityContext }}
+ securityContext:
+{{ toYaml .Values.podSecurityContext | indent 10 }}
+{{- end }}
+ volumeMounts:
+ {{- range .Values.secretMounts }}
+ - name: {{ .name }}
+ mountPath: {{ .path }}
+ {{- if .subPath }}
+ subPath: {{ .subPath }}
+ {{- end }}
+ {{- end }}
+ {{- range $path, $config := .Values.metricbeatConfig }}
+ - name: metricbeat-config
+ mountPath: /usr/share/metricbeat/{{ $path }}
+ readOnly: true
+ subPath: {{ $path }}
+ {{- end }}
+ - name: data
+ mountPath: /usr/share/metricbeat/data
+ - name: varlibdockercontainers
+ mountPath: /var/lib/docker/containers
+ readOnly: true
+ # Necessary when using autodiscovery; avoid mounting it otherwise
+ # See: https://www.elastic.co/guide/en/beats/metricbeat/master/configuration-autodiscover.html
+ - name: varrundockersock
+ mountPath: /var/run/docker.sock
+ readOnly: true
+ {{- if .Values.extraVolumeMounts }}
+{{ tpl .Values.extraVolumeMounts . | indent 8 }}
+ {{- end }}
diff --git a/metricbeat/templates/deployment.yaml b/metricbeat/templates/deployment.yaml
new file mode 100644
index 000000000..96cce2ea6
--- /dev/null
+++ b/metricbeat/templates/deployment.yaml
@@ -0,0 +1,119 @@
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: '{{ template "fullname" . }}-metrics'
+ labels:
+ app: '{{ template "fullname" . }}-metrics'
+ chart: '{{ .Chart.Name }}-{{ .Chart.Version }}'
+ heritage: '{{ .Release.Service }}'
+ release: '{{ .Release.Name }}'
+spec:
+ replicas: {{ .Values.replicas }}
+ selector:
+ matchLabels:
+ app: '{{ template "fullname" . }}-metrics'
+ chart: '{{ .Chart.Name }}-{{ .Chart.Version }}'
+ heritage: '{{ .Release.Service }}'
+ release: '{{ .Release.Name }}'
+ template:
+ metadata:
+ annotations:
+ {{- range $key, $value := .Values.podAnnotations }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+ {{/* This forces a restart if the configmap has changed */}}
+ {{- if .Values.metricbeatConfig }}
+ configChecksum: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum | trunc 63 }}
+ {{- end }}
+ labels:
+ app: '{{ template "fullname" . }}-metrics'
+ chart: '{{ .Chart.Name }}-{{ .Chart.Version }}'
+ heritage: '{{ .Release.Service }}'
+ release: '{{ .Release.Name }}'
+ spec:
+ {{- with .Values.tolerations }}
+ tolerations:
+{{ toYaml . | indent 6 }}
+ {{- end }}
+ serviceAccountName: {{ template "serviceAccount" . }}
+ terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }}
+ volumes:
+ {{- range .Values.secretMounts }}
+ - name: {{ .name }}
+ secret:
+ secretName: {{ .secretName }}
+ {{- end }}
+ {{- if .Values.metricbeatConfig }}
+ - name: metricbeat-config
+ configMap:
+ defaultMode: 0600
+ name: {{ template "fullname" . }}-config
+ {{- end }}
+ {{- if .Values.extraVolumes }}
+{{ tpl .Values.extraVolumes . | indent 6 }}
+ {{- end }}
+ {{- if .Values.imagePullSecrets }}
+ imagePullSecrets:
+{{ toYaml .Values.imagePullSecrets | indent 8 }}
+ {{- end }}
+ containers:
+ - name: "metricbeat"
+ image: "{{ .Values.image }}:{{ .Values.imageTag }}"
+ imagePullPolicy: "{{ .Values.imagePullPolicy }}"
+ args:
+ - "-c"
+ - "/usr/share/metricbeat/kube-state-metrics-metricbeat.yml"
+ - "-e"
+ - "-E"
+ - "http.enabled=true"
+ livenessProbe:
+ exec:
+ command:
+ - sh
+ - -c
+ - |
+ #!/usr/bin/env bash -e
+ curl --fail 127.0.0.1:5066
+ readinessProbe:
+ exec:
+ command:
+ - sh
+ - -c
+ - |
+ #!/usr/bin/env bash -e
+ metricbeat test output
+{{ toYaml .Values.readinessProbe | indent 10 }}
+ resources:
+{{ toYaml .Values.resources | indent 10 }}
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: KUBE_STATE_METRICS_HOSTS
+ value: "$({{ .Release.Name | replace "-" "_" | upper }}_KUBE_STATE_METRICS_SERVICE_HOST):$({{ .Release.Name | replace "-" "_" | upper }}_KUBE_STATE_METRICS_SERVICE_PORT_HTTP)"
+{{- if .Values.extraEnvs }}
+{{ toYaml .Values.extraEnvs | indent 8 }}
+{{- end }}
+{{- if .Values.podSecurityContext }}
+ securityContext:
+{{ toYaml .Values.podSecurityContext | indent 10 }}
+{{- end }}
+ volumeMounts:
+ {{- range .Values.secretMounts }}
+ - name: {{ .name }}
+ mountPath: {{ .path }}
+ {{- if .subPath }}
+ subPath: {{ .subPath }}
+ {{- end }}
+ {{- end }}
+ {{- range $path, $config := .Values.metricbeatConfig }}
+ - name: metricbeat-config
+ mountPath: /usr/share/metricbeat/{{ $path }}
+ readOnly: true
+ subPath: {{ $path }}
+ {{- end }}
+ {{- if .Values.extraVolumeMounts }}
+{{ tpl .Values.extraVolumeMounts . | indent 8 }}
+ {{- end }}
diff --git a/metricbeat/templates/serviceaccount.yaml b/metricbeat/templates/serviceaccount.yaml
new file mode 100644
index 000000000..70a7a049b
--- /dev/null
+++ b/metricbeat/templates/serviceaccount.yaml
@@ -0,0 +1,11 @@
+{{- if .Values.managedServiceAccount }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ template "serviceAccount" . }}
+ labels:
+ app: "{{ template "fullname" . }}"
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+ heritage: {{ .Release.Service | quote }}
+ release: {{ .Release.Name | quote }}
+{{- end -}}
diff --git a/metricbeat/tests/metricbeat_test.py b/metricbeat/tests/metricbeat_test.py
new file mode 100644
index 000000000..a274b6a90
--- /dev/null
+++ b/metricbeat/tests/metricbeat_test.py
@@ -0,0 +1,209 @@
+import os
+import sys
+sys.path.insert(1, os.path.join(sys.path[0], '../../helpers'))
+from helpers import helm_template
+import yaml
+
+project = 'metricbeat'
+name = 'release-name-' + project
+
+def test_defaults():
+ config = '''
+ '''
+
+ r = helm_template(config)
+
+ assert name in r['daemonset']
+
+ c = r['daemonset'][name]['spec']['template']['spec']['containers'][0]
+ assert c['name'] == project
+ assert c['image'].startswith('docker.elastic.co/beats/' + project + ':')
+
+ assert c['env'][0]['name'] == 'POD_NAMESPACE'
+ assert c['env'][0]['valueFrom']['fieldRef']['fieldPath'] == 'metadata.namespace'
+
+ assert 'curl --fail 127.0.0.1:5066' in c['livenessProbe']['exec']['command'][-1]
+
+ assert 'metricbeat test output' in c['readinessProbe']['exec']['command'][-1]
+
+ # Empty customizable defaults
+ assert 'imagePullSecrets' not in r['daemonset'][name]['spec']['template']['spec']
+ assert 'tolerations' not in r['daemonset'][name]['spec']['template']['spec']
+
+ assert r['daemonset'][name]['spec']['updateStrategy']['type'] == 'RollingUpdate'
+
+ assert r['daemonset'][name]['spec']['template']['spec']['serviceAccountName'] == name
+
+ volumes = r['daemonset'][name]['spec']['template']['spec']['volumes']
+ assert {
+ 'name': 'data',
+ 'hostPath': {
+ 'path': '/var/lib/' + name + '-default-data',
+ 'type': 'DirectoryOrCreate'
+ }
+ } in volumes
+
+
+def test_adding_envs():
+ config = '''
+extraEnvs:
+- name: LOG_LEVEL
+ value: DEBUG
+'''
+ r = helm_template(config)
+ envs = r['daemonset'][name]['spec']['template']['spec']['containers'][0]['env']
+ assert {'name': 'LOG_LEVEL', 'value': 'DEBUG'} in envs
+
+
+def test_adding_image_pull_secrets():
+ config = '''
+imagePullSecrets:
+ - name: test-registry
+'''
+ r = helm_template(config)
+ assert r['daemonset'][name]['spec']['template']['spec']['imagePullSecrets'][0]['name'] == 'test-registry'
+
+
+def test_adding_tolerations():
+ config = '''
+tolerations:
+- key: "key1"
+ operator: "Equal"
+ value: "value1"
+ effect: "NoExecute"
+ tolerationSeconds: 3600
+'''
+ r = helm_template(config)
+ assert r['daemonset'][name]['spec']['template']['spec']['tolerations'][0]['key'] == 'key1'
+
+
+def test_override_the_default_update_strategy():
+ config = '''
+updateStrategy: OnDelete
+'''
+
+ r = helm_template(config)
+ assert r['daemonset'][name]['spec']['updateStrategy']['type'] == 'OnDelete'
+
+def test_setting_a_custom_service_account():
+ config = '''
+serviceAccount: notdefault
+'''
+ r = helm_template(config)
+ assert r['daemonset'][name]['spec']['template']['spec']['serviceAccountName'] == 'notdefault'
+
+def test_self_managing_rbac_resources():
+ config = '''
+managedServiceAccount: false
+'''
+ r = helm_template(config)
+ assert name not in r['serviceaccount']
+ assert name not in r['clusterrole']
+ assert name not in r['clusterrolebinding']
+
+def test_setting_pod_security_context():
+ config = '''
+podSecurityContext:
+ runAsUser: 1001
+ privileged: false
+'''
+ r = helm_template(config)
+ c = r['daemonset'][name]['spec']['template']['spec']['containers'][0]
+ assert c['securityContext']['runAsUser'] == 1001
+ assert c['securityContext']['privileged'] == False
+
+def test_adding_in_metricbeat_config():
+ config = '''
+metricbeatConfig:
+ metricbeat.yml: |
+ key:
+ nestedkey: value
+ dot.notation: test
+
+ other-config.yml: |
+ hello = world
+'''
+ r = helm_template(config)
+ c = r['configmap'][name + '-config']['data']
+
+ assert 'metricbeat.yml' in c
+ assert 'other-config.yml' in c
+
+ assert 'nestedkey: value' in c['metricbeat.yml']
+ assert 'dot.notation: test' in c['metricbeat.yml']
+
+ assert 'hello = world' in c['other-config.yml']
+
+ d = r['daemonset'][name]['spec']['template']['spec']
+
+ assert {'configMap': {'name': name + '-config', 'defaultMode': 0o600}, 'name': project + '-config'} in d['volumes']
+ assert {'mountPath': '/usr/share/metricbeat/metricbeat.yml', 'name': project + '-config', 'subPath': 'metricbeat.yml', 'readOnly': True} in d['containers'][0]['volumeMounts']
+ assert {'mountPath': '/usr/share/metricbeat/other-config.yml', 'name': project + '-config', 'subPath': 'other-config.yml', 'readOnly': True} in d['containers'][0]['volumeMounts']
+
+ assert 'configChecksum' in r['daemonset'][name]['spec']['template']['metadata']['annotations']
+
+
+def test_adding_a_secret_mount():
+ config = '''
+secretMounts:
+ - name: elastic-certificates
+ secretName: elastic-certificates-name
+ path: /usr/share/metricbeat/config/certs
+'''
+ r = helm_template(config)
+ s = r['daemonset'][name]['spec']['template']['spec']
+ assert s['containers'][0]['volumeMounts'][0] == {
+ 'mountPath': '/usr/share/metricbeat/config/certs',
+ 'name': 'elastic-certificates'
+ }
+ assert s['volumes'][0] == {
+ 'name': 'elastic-certificates',
+ 'secret': {
+ 'secretName': 'elastic-certificates-name'
+ }
+ }
+
+
+def test_adding_a_extra_volume_with_volume_mount():
+ config = '''
+extraVolumes: |
+ - name: extras
+ emptyDir: {}
+extraVolumeMounts: |
+ - name: extras
+ mountPath: /usr/share/extras
+ readOnly: true
+'''
+ r = helm_template(config)
+ extraVolume = r['daemonset'][name]['spec']['template']['spec']['volumes']
+ assert {'name': 'extras', 'emptyDir': {}} in extraVolume
+ extraVolumeMounts = r['daemonset'][name]['spec']['template']['spec']['containers'][0]['volumeMounts']
+ assert {'name': 'extras', 'mountPath': '/usr/share/extras', 'readOnly': True} in extraVolumeMounts
+
+
+def test_adding_a_node_selector():
+ config = '''
+nodeSelector:
+ disktype: ssd
+'''
+ r = helm_template(config)
+ assert r['daemonset'][name]['spec']['template']['spec']['nodeSelector']['disktype'] == 'ssd'
+
+
+def test_adding_an_affinity_rule():
+ config = '''
+affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - metricbeat
+ topologyKey: kubernetes.io/hostname
+'''
+
+ r = helm_template(config)
+ assert r['daemonset'][name]['spec']['template']['spec']['affinity']['podAntiAffinity'][
+ 'requiredDuringSchedulingIgnoredDuringExecution'][0]['topologyKey'] == 'kubernetes.io/hostname'
diff --git a/metricbeat/values.yaml b/metricbeat/values.yaml
new file mode 100755
index 000000000..c088a8793
--- /dev/null
+++ b/metricbeat/values.yaml
@@ -0,0 +1,152 @@
+---
+# Allows you to add any config files in /usr/share/metricbeat
+# such as metricbeat.yml
+metricbeatConfig:
+ metricbeat.yml: |
+ system:
+ hostfs: /hostfs
+ metricbeat.modules:
+ - module: kubernetes
+ metricsets:
+ - container
+ - node
+ - pod
+ - system
+ - volume
+ period: 10s
+ host: "${NODE_NAME}"
+ hosts: ["${NODE_NAME}:10255"]
+ processors:
+ - add_kubernetes_metadata:
+ in_cluster: true
+ - module: kubernetes
+ enabled: true
+ metricsets:
+ - event
+ - module: system
+ period: 10s
+ metricsets:
+ - cpu
+ - load
+ - memory
+ - network
+ - process
+ - process_summary
+ processes: ['.*']
+ process.include_top_n:
+ by_cpu: 5
+ by_memory: 5
+ - module: system
+ period: 1m
+ metricsets:
+ - filesystem
+ - fsstat
+ processors:
+ - drop_event.when.regexp:
+ system.filesystem.mount_point: '^/(sys|cgroup|proc|dev|etc|host|lib)($|/)'
+ output.elasticsearch:
+ hosts: '${ELASTICSEARCH_HOSTS:elasticsearch-master:9200}'
+
+ kube-state-metrics-metricbeat.yml: |
+ metricbeat.modules:
+ - module: kubernetes
+ enabled: true
+ metricsets:
+ - state_node
+ - state_deployment
+ - state_replicaset
+ - state_pod
+ - state_container
+ period: 10s
+ hosts: ["${KUBE_STATE_METRICS_HOSTS:kube-state-metrics:8080}"]
+ output.elasticsearch:
+ hosts: '${ELASTICSEARCH_HOSTS:elasticsearch-master:9200}'
+
+# Replicas being used for the kube-state-metrics metricbeat deployment
+
+replicas: 1
+
+# Extra environment variables to append to the DaemonSet pod spec.
+# This will be appended to the current 'env:' key. You can use any of the kubernetes env
+# syntax here
+extraEnvs: []
+# - name: MY_ENVIRONMENT_VAR
+# value: the_value_goes_here
+
+extraVolumeMounts: ""
+ # - name: extras
+ # mountPath: /usr/share/extras
+ # readOnly: true
+
+extraVolumes: ""
+ # - name: extras
+ # emptyDir: {}
+
+# Root directory where metricbeat will write data to in order to persist registry data across pod restarts (file position and other metadata).
+hostPathRoot: /var/lib
+
+image: "docker.elastic.co/beats/metricbeat"
+imageTag: "7.3.0"
+imagePullPolicy: "IfNotPresent"
+imagePullSecrets: []
+
+livenessProbe:
+ failureThreshold: 3
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 5
+
+readinessProbe:
+ failureThreshold: 3
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 5
+
+# Whether this chart should self-manage its service account, role, and associated role binding.
+managedServiceAccount: true
+
+podAnnotations: {}
+ # iam.amazonaws.com/role: es-cluster
+
+# Various pod security context settings. Bear in mind that many of these have an impact on metricbeat functioning properly.
+#
+# - Filesystem group for the metricbeat user. The official elastic docker images always have an id of 1000.
+# - User that the container will execute as. Typically necessary to run as root (0) in order to properly collect host container logs.
+# - Whether to execute the metricbeat containers as privileged containers. Typically not necessarily unless running within environments such as OpenShift.
+podSecurityContext:
+ runAsUser: 0
+ privileged: false
+
+resources:
+ requests:
+ cpu: "100m"
+ memory: "100Mi"
+ limits:
+ cpu: "1000m"
+ memory: "200Mi"
+
+# Custom service account override that the pod will use
+serviceAccount: ""
+
+# A list of secrets and their paths to mount inside the pod
+# This is useful for mounting certificates for security other sensitive values
+secretMounts: []
+# - name: metricbeat-certificates
+# secretName: metricbeat-certificates
+# path: /usr/share/metricbeat/certs
+
+# How long to wait for metricbeat pods to stop gracefully
+terminationGracePeriod: 30
+
+tolerations: []
+
+nodeSelector: {}
+
+affinity: {}
+
+updateStrategy: RollingUpdate
+
+# Override various naming aspects of this chart
+# Only edit these if you know what you're doing
+nameOverride: ""
+fullnameOverride: ""