diff --git a/.github/workflows/push.yaml b/.github/workflows/push.yaml
index 5522caa8c..2d1c9b6ba 100644
--- a/.github/workflows/push.yaml
+++ b/.github/workflows/push.yaml
@@ -35,6 +35,12 @@ jobs:
with:
command: package main/sentry --destination gh-pages/charts
+
+ - name: Build zips
+ uses: yeouchien/helm3-action@f3a7c239c5c60777210c8e631839edf5dd3fa29c
+ with:
+ command: package main/clickhouse --destination gh-pages/charts
+
- name: Create index file
uses: yeouchien/helm3-action@f3a7c239c5c60777210c8e631839edf5dd3fa29c
with:
diff --git a/clickhouse/.helmignore b/clickhouse/.helmignore
new file mode 100755
index 000000000..f0c131944
--- /dev/null
+++ b/clickhouse/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/clickhouse/Chart.yaml b/clickhouse/Chart.yaml
new file mode 100755
index 000000000..089730a3d
--- /dev/null
+++ b/clickhouse/Chart.yaml
@@ -0,0 +1,13 @@
+appVersion: "19.14"
+description: ClickHouse is an open source column-oriented database management system
+ capable of real time generation of analytical data reports using SQL queries
+home: https://clickhouse.yandex/
+icon: https://clickhouse.yandex/images/logo.png
+keywords:
+- clickhouse
+- olap
+- database
+name: clickhouse
+sources:
+- https://github.com/sentry-kubernetes/charts
+version: 1.0.0
diff --git a/clickhouse/README.md b/clickhouse/README.md
new file mode 100755
index 000000000..ec22a59f8
--- /dev/null
+++ b/clickhouse/README.md
@@ -0,0 +1,169 @@
+# ClickHouse
+
+[ClickHouse](https://clickhouse.yandex/) is an open source column-oriented database management system capable of real time generation of analytical data reports using SQL queries.
+
+## Introduction
+This chart bootstraps a [ClickHouse](https://clickhouse.yandex/) replication cluster deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+## Prerequisites
+
+- Kubernetes 1.10+
+- PV provisioner support in the underlying infrastructure
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```bash
+$ helm repo add liwenhe https://liwenhe1993.github.io/charts/
+$ helm repo update
+$ helm install --name clickhouse liwenhe/clickhouse
+```
+These commands deploy Clickhouse on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `clickhouse` deployment:
+
+```bash
+$ helm delete --purge clickhouse
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+## Configuration
+
+The following tables lists the configurable parameters of the Clickhouse chart and their default values.
+
+| Parameter | Description | Default |
+| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------- |
+| `timezone` | World time and date for cities in all time zones | `Asia/Shanghai` |
+| `clusterDomain` | Kubernetes cluster domain | `cluster.local` |
+| `affinity` | Clickhouse Node selectors and tolerations for pod assignment | `nil` |
+| `clickhouse.podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees | `Parallel` |
+| `clickhouse.updateStrategy` | StatefulSet controller supports automated updates. There are two valid update strategies: RollingUpdate and OnDelete | `RollingUpdate` |
+| `clickhouse.rollingUpdatePartition` | Partition update strategy | `nil` |
+| `clickhouse.path` | The path to the directory containing data | `/var/lib/clickhouse` |
+| `clickhouse.http_port` | The port for connecting to the server over HTTP | `8123` |
+| `clickhouse.tcp_port` | Port for communicating with clients over the TCP protocol | `9000` |
+| `clickhouse.interserver_http_port` | Port for exchanging data between ClickHouse servers | `9009` |
+| `clickhouse.replicas` | The instance number of Clickhouse | `3` |
+| `clickhouse.image` | Docker image for Clickhouse | `yandex/clickhouse-server` |
+| `clickhouse.imageVersion` | Docker image version for Clickhouse | `19.14` |
+| `clickhouse.imagePullPolicy` | Image pull policy. One of Always, Never, IfNotPresent | `IfNotPresent` |
+| `clickhouse.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
+| `clickhouse.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
+| `clickhouse.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `clickhouse.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `clickhouse.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `clickhouse.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `clickhouse.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
+| `clickhouse.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
+| `clickhouse.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `clickhouse.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `clickhouse.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `clickhouse.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `clickhouse.persistentVolumeClaim.enabled` | Enable persistence using a `PersistentVolumeClaim` | `false` |
+| `clickhouse.persistentVolumeClaim.dataPersistentVolume.enabled` | Turn on and off dataPersistentVolume | `false` |
+| `clickhouse.persistentVolumeClaim.dataPersistentVolume.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` |
+| `clickhouse.persistentVolumeClaim.dataPersistentVolume.storageClassName` | Persistent Volume Storage Class | `` |
+| `clickhouse.persistentVolumeClaim.dataPersistentVolume.storage` | Persistent Volume Size | `500Gi` |
+| `clickhouse.persistentVolumeClaim.logsPersistentVolume.enabled` | Turn on and off dataPersistentVolume | `false` |
+| `clickhouse.persistentVolumeClaim.logsPersistentVolume.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` |
+| `clickhouse.persistentVolumeClaim.logsPersistentVolume.storageClassName` | Persistent Volume Storage Class | `` |
+| `clickhouse.persistentVolumeClaim.logsPersistentVolume.storage` | Persistent Volume Size | `50Gi` |
+| `clickhouse.ingress.enabled` | Enable ingress | `false` |
+| `clickhouse.ingress.host` | Ingress host | `` |
+| `clickhouse.ingress.path` | Ingress path | `` |
+| `clickhouse.ingress.tls.enabled` | Enable ingress tls | `false` |
+| `clickhouse.ingress.tls.hosts` | Ingress tls hosts | `[]` |
+| `clickhouse.ingress.tls.secretName` | Ingress tls `secretName` | `` |
+| `clickhouse.configmap.enabled` | If Configmap's enabled is `true`, Custom `config.xml`, `metrica.xml` and `users.xml` | `true` |
+| `clickhouse.configmap.max_connections` | The maximum number of inbound connections | `4096` |
+| `clickhouse.configmap.keep_alive_timeout` | The number of seconds that ClickHouse waits for incoming requests before closing the connection | `3` |
+| `clickhouse.configmap.max_concurrent_queries` | The maximum number of simultaneously processed requests | `100` |
+| `clickhouse.configmap.uncompressed_cache_size` | Cache size (in bytes) for uncompressed data used by table engines from the MergeTree | `8589934592` |
+| `clickhouse.configmap.mark_cache_size` | Approximate size (in bytes) of the cache of "marks" used by MergeTree | `5368709120` |
+| `clickhouse.configmap.umask` | Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read) | `022` |
+| `clickhouse.configmap.mlock_executable` | Enabling this option is recommended but will lead to increased startup time for up to a few seconds | `false` |
+| `clickhouse.configmap.builtin_dictionaries_reload_interval` | The interval in seconds before reloading built-in dictionaries | `3600` |
+| `clickhouse.configmap.max_session_timeout` | Maximum session timeout, in seconds | `3600` |
+| `clickhouse.configmap.default_session_timeout` | Default session timeout, in seconds | `60` |
+| `clickhouse.configmap.disable_internal_dns_cache` | Uncomment to disable ClickHouse internal DNS caching | `1` |
+| `clickhouse.configmap.max_open_files` | The maximum number of open files | `` |
+| `clickhouse.configmap.interserver_http_host` | The host name that can be used by other servers to access this server | `` |
+| `clickhouse.configmap.logger.path` | The log file path | `/var/log/clickhouse-server` |
+| `clickhouse.configmap.logger.level` | Logging level. Acceptable values: trace, debug, information, warning, error | `trace` |
+| `clickhouse.configmap.logger.size` | Size of the file | `1000M` |
+| `clickhouse.configmap.logger.count` | The number of archived log files that ClickHouse stores | `10` |
+| `clickhouse.configmap.compression.enabled` | Enable data compression settings | `false` |
+| `clickhouse.configmap.compression.cases[].min_part_size` | The minimum size of a table part | `10000000000` |
+| `clickhouse.configmap.compression.cases[].min_part_size_ratio` | The ratio of the minimum size of a table part to the full size of the table | `0.01` |
+| `clickhouse.configmap.compression.cases[].method` | Compression method. Acceptable values : lz4 or zstd(experimental) | `zstd` |
+| `clickhouse.configmap.zookeeper_servers.enabled` | Enable contains settings that allow ClickHouse to interact with a ZooKeeper cluster | `false` |
+| `clickhouse.configmap.zookeeper_servers.session_timeout_ms` | Maximum timeout for the client session in milliseconds | `30000` |
+| `clickhouse.configmap.zookeeper_servers.operation_timeout_ms` | Operation timeout for the client session in milliseconds | `10000` |
+| `clickhouse.configmap.zookeeper_servers.root` | The znode that is used as the root for znodes used by the ClickHouse server. Optional | `` |
+| `clickhouse.configmap.zookeeper_servers.identity` | User and password, that can be required by ZooKeeper to give access to requested znodes. Optional | `` |
+| `clickhouse.configmap.zookeeper_servers.config[].index` | ZooKeeper index | `` |
+| `clickhouse.configmap.zookeeper_servers.config[].host` | ZooKeeper host | `` |
+| `clickhouse.configmap.zookeeper_servers.config[].port` | ZooKeeper port | `` |
+| `clickhouse.configmap.remote_servers.enabled` | Enable configuration of clusters used by the Distributed table engine | `true` |
+| `clickhouse.configmap.remote_servers.internal_replication` | If this parameter is set to 'true', the table where data will be written is going to replicate them itself | `false` |
+| `clickhouse.configmap.remote_servers.replica.user` | Name of the user for connecting to a remote server. Access is configured in the users.xml file. | `default` |
+| `clickhouse.configmap.remote_servers.replica.password` | The password for connecting to a remote server (not masked). | `nil` |
+| `clickhouse.configmap.remote_servers.replica.compression` | Use data compression. | `true` |
+| `clickhouse.configmap.remote_servers.replica.backup.enabled` | Enable replica backup | `false` |
+| `clickhouse.configmap.remote_servers.graphite.enabled` | Enable graphite | `false` |
+| `clickhouse.configmap.remote_servers.graphite.config[].timeout` | The timeout for sending data, in seconds | `0.1` |
+| `clickhouse.configmap.remote_servers.graphite.config[].interval` | The interval for sending, in seconds | `60` |
+| `clickhouse.configmap.remote_servers.graphite.config[].root_path` | Prefix for keys | `one_min` |
+| `clickhouse.configmap.remote_servers.graphite.config[].metrics` | Sending data from a :ref:system_tables-system.metrics table | `true` |
+| `clickhouse.configmap.remote_servers.graphite.config[].events` | Sending deltas data accumulated for the time period from a :ref:system_tables-system.events table | `true` |
+| `clickhouse.configmap.remote_servers.graphite.config[].events_cumulative` | Sending cumulative data from a :ref:system_tables-system.events table | `true` |
+| `clickhouse.configmap.remote_servers.graphite.config[].asynchronous_metrics` | Sending data from a :ref:system_tables-system.asynchronous_metrics table | `true` |
+| `clickhouse.configmap.profiles.enabled` | Enable a settings profiles | `false` |
+| `clickhouse.configmap.profiles.profile[].name` | Tne name of a settings profile | `` |
+| `clickhouse.configmap.profiles.profile[].config` | The config of a settings profile | `{}` |
+| `clickhouse.configmap.users.enabled` | Enable a settings users | `false` |
+| `clickhouse.configmap.users.user[].name` | Tne name of a settings user | `` |
+| `clickhouse.configmap.users.user[].config` | Tne config of a settings user | `{}` |
+| `clickhouse.configmap.quotas.enabled` | Enable a settings quotas | `false` |
+| `clickhouse.configmap.quotas.quota[].name` | Tne name of a settings quota | `` |
+| `clickhouse.configmap.quotas.quota[].config[]` | Tne config of a settings quota | `[]` |
+| `tabix.enabled` | Enable tabix | `false` |
+| `tabix.replicas` | The instance number of Tabix | `1` |
+| `tabix.updateStrategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate | `RollingUpdate` |
+| `tabix.updateStrategy.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `3` |
+| `tabix.updateStrategy.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `1` |
+| `tabix.image` | Docker image name | `spoonest/clickhouse-tabix-web-client` |
+| `tabix.imageVersion` | Docker image version | `stable` |
+| `tabix.imagePullPolicy` | Dcoker image pull policy | `IfNotPresent` |
+| `tabix.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
+| `tabix.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
+| `tabix.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `tabix.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `tabix.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `tabix.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `tabix.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
+| `tabix.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
+| `tabix.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `tabix.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `tabix.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `tabix.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `tabix.security.user` | Tabix login username | `admin` |
+| `tabix.security.password` | Tabix login password | `admin` |
+| `tabix.automaticConnection.chName` | Automatic connection Clickhouse name | `` |
+| `tabix.automaticConnection.chHost` | Automatic connection Clickhouse host | `` |
+| `tabix.automaticConnection.chLogin` | Automatic connection Clickhouse login username | `` |
+| `tabix.automaticConnection.chPassword` | Automatic connection Clickhouse login password | `` |
+| `tabix.automaticConnection.chParams` | Automatic connection Clickhouse params | `` |
+| `tabix.ingress.enabled` | Enable ingress | `false` |
+| `tabix.ingress.host` | Ingress host | `` |
+| `tabix.ingress.path` | Ingress path | `` |
+| `tabix.ingress.tls.enabled` | Enable ingress tls | `false` |
+| `tabix.ingress.tls.hosts` | Ingress tls hosts | `[]` |
+
+For more information please refer to the [liwenhe1993/charts](https://github.com/liwenhe1993/charts.git) documentation.
diff --git a/clickhouse/templates/NOTES.txt b/clickhouse/templates/NOTES.txt
new file mode 100755
index 000000000..f8a6dd147
--- /dev/null
+++ b/clickhouse/templates/NOTES.txt
@@ -0,0 +1,31 @@
+** Please be patient while the chart is being deployed **
+
+1. Get the Clickhouse URL by running:
+
+{{- if .Values.clickhouse.ingress.enabled }}
+
+ export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "clickhouse.fullname" . }} -o jsonpath='{.spec.rules[0].host}')
+ echo "Clickhouse URL: http://$HOSTNAME/"
+
+{{- else }}
+
+ echo URL : http://127.0.0.1:8080/
+ echo Management URL : http://127.0.0.1:8080/manager
+ kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "clickhouse.fullname" . }} 8123:{{ .Values.clickhouse.http_port }}
+ kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "clickhouse.fullname" . }} 9000:{{ .Values.clickhouse.tcp_port }}
+ kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "clickhouse.fullname" . }} 9009:{{ .Values.clickhouse.interserver_http_port }}
+
+{{- end }}
+
+2. Get the Tabix URL by running:
+
+{{- if .Values.tabix.ingress.enabled }}
+
+ export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "clickhouse.fullname" . }}-tabix -o jsonpath='{.spec.rules[0].host}')
+ echo "Tabix URL: http://$HOSTNAME/"
+
+{{- else }}
+
+ kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "clickhouse.fullname" . }}-tabix 80
+
+{{- end }}
diff --git a/clickhouse/templates/_helpers.tpl b/clickhouse/templates/_helpers.tpl
new file mode 100755
index 000000000..e6690cdf7
--- /dev/null
+++ b/clickhouse/templates/_helpers.tpl
@@ -0,0 +1,56 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "clickhouse.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "clickhouse.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "clickhouse.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create clickhouse path.
+if .Values.clickhouse.path is empty, default value "/var/lib/clickhouse".
+*/}}
+{{- define "clickhouse.fullpath" -}}
+{{- if .Values.clickhouse.path -}}
+{{- .Values.clickhouse.path | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s" "/var/lib/clickhouse" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create clickhouse log path.
+if .Values.clickhouse.configmap.logger.path is empty, default value "/var/log/clickhouse-server".
+*/}}
+{{- define "clickhouse.logpath" -}}
+{{- if .Values.clickhouse.configmap.logger.path -}}
+{{- .Values.clickhouse.configmap.logger.path | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s" "/var/log/clickhouse-server" -}}
+{{- end -}}
+{{- end -}}
diff --git a/clickhouse/templates/configmap-config.yaml b/clickhouse/templates/configmap-config.yaml
new file mode 100755
index 000000000..3bfae41e0
--- /dev/null
+++ b/clickhouse/templates/configmap-config.yaml
@@ -0,0 +1,112 @@
+{{- if .Values.clickhouse.configmap.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "clickhouse.fullname" . }}-config
+ labels:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}-config
+ app.kubernetes.io/instance: {{ .Release.Name }}-config
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+data:
+ config.xml: |-
+
+
+ {{ include "clickhouse.fullpath" . }}/
+ {{ printf "%s/tmp/" (include "clickhouse.fullpath" .) }}
+ {{ printf "%s/user_files/" (include "clickhouse.fullpath" .) }}
+ {{ printf "%s/format_schemas/" (include "clickhouse.fullpath" .) }}
+
+ /etc/clickhouse-server/metrica.d/metrica.xml
+
+ users.xml
+
+ {{ template "clickhouse.fullname" . }}
+ 0.0.0.0
+ {{ .Values.clickhouse.http_port | default "8123" }}
+ {{ .Values.clickhouse.tcp_port | default "9000" }}
+ {{ .Values.clickhouse.interserver_http_port | default "9009" }}
+ {{ .Values.clickhouse.configmap.max_connections | default "4096" }}
+ {{ .Values.clickhouse.configmap.keep_alive_timeout | default "3" }}
+ {{ .Values.clickhouse.configmap.max_concurrent_queries | default "100" }}
+ {{ .Values.clickhouse.configmap.uncompressed_cache_size | default "8589934592" }}
+ {{ .Values.clickhouse.configmap.mark_cache_size | default "5368709120" }}
+ {{ .Values.timezone | default "Asia/Shanghai" }}
+ {{ .Values.clickhouse.configmap.umask | default "027" }}
+ {{ .Values.clickhouse.configmap.mlock_executable | default "false" }}
+
+
+
+ {{ .Values.clickhouse.configmap.builtin_dictionaries_reload_interval | default "3600" }}
+ {{ .Values.clickhouse.configmap.max_session_timeout | default "3600" }}
+ {{ .Values.clickhouse.configmap.default_session_timeout | default "60" }}
+ {{ .Values.clickhouse.configmap.disable_internal_dns_cache | default "1" }}
+
+
+ system
+
+ toYYYYMM(event_date)
+ 7500
+
+
+
+ system
+
+ toYYYYMM(event_date)
+ 7500
+
+
+
+ /clickhouse/task_queue/ddl
+
+
+ {{- if .Values.clickhouse.configmap.logger }}
+
+ {{ .Values.clickhouse.configmap.logger.level | default "trace" }}
+ {{ printf "%s/%s" (include "clickhouse.logpath" .) "clickhouse-server.log" }}
+ {{ printf "%s/%s" (include "clickhouse.logpath" .) "clickhouse-server.err.log" }}
+ {{ .Values.clickhouse.configmap.logger.size | default "1000M" }}
+ {{ .Values.clickhouse.configmap.logger.count | default "10" }}
+
+ {{- end }}
+
+ {{- if .Values.clickhouse.configmap.compression.enabled }}
+
+ {{- range .Values.clickhouse.configmap.compression.cases }}
+ {{- with . }}
+
+ {{ .min_part_size }}
+ {{ .min_part_size_ratio }}
+ {{ .method }}
+
+ {{- end }}
+ {{- end }}
+
+ {{- end }}
+
+ {{- if .Values.clickhouse.configmap.graphite.enabled }}
+ {{- range .Values.clickhouse.configmap.graphite.config }}
+ {{- with . }}
+
+ {{ template "clickhouse.fullname" $ }}-graphite.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}
+ {{ $.Values.clickhouse.graphite.service.port }}
+ {{ .timeout | default "0.1" }}
+ {{ .interval | default "60" }}
+ {{ .root_path | default "one_min" }}
+ {{ .metrics | default "true" }}
+ {{ .events | default "true" }}
+ {{ .events_cumulative | default "true" }}
+ {{ .asynchronous_metrics | default "true" }}
+
+ {{- end }}
+ {{- end }}
+ {{- end }}
+
+ {{- if .Values.clickhouse.configmap.max_open_files }}
+ {{ .Values.clickhouse.configmap.max_open_files }}
+ {{- end }}
+
+ {{- if .Values.clickhouse.configmap.interserver_http_host }}
+ {{ .Values.clickhouse.configmap.interserver_http_host }}
+ {{- end }}
+
+{{- end }}
diff --git a/clickhouse/templates/configmap-metrika.yaml b/clickhouse/templates/configmap-metrika.yaml
new file mode 100755
index 000000000..2d14bc999
--- /dev/null
+++ b/clickhouse/templates/configmap-metrika.yaml
@@ -0,0 +1,77 @@
+{{- if .Values.clickhouse.configmap.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "clickhouse.fullname" . }}-metrica
+ labels:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}-metrica
+ app.kubernetes.io/instance: {{ .Release.Name }}-metrica
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+data:
+ metrica.xml: |-
+
+
+ {{- if .Values.clickhouse.configmap.zookeeper_servers.enabled }}
+
+ {{- range .Values.clickhouse.configmap.zookeeper_servers.config }}
+ {{- with . }}
+
+ {{ .host }}
+ {{ .port }}
+
+ {{- end }}
+ {{- end }}
+ {{ .Values.clickhouse.configmap.zookeeper_servers.session_timeout_ms }}
+ {{ .Values.clickhouse.configmap.zookeeper_servers.operation_timeout_ms }}
+ {{ .Values.clickhouse.configmap.zookeeper_servers.root }}
+ {{ .Values.clickhouse.configmap.zookeeper_servers.identity }}
+
+ {{- end }}
+
+ {{- if .Values.clickhouse.configmap.remote_servers.enabled }}
+
+ <{{ include "clickhouse.fullname" . }}>
+ {{- range untilStep 0 (int .Values.clickhouse.replicas) 1 }}
+
+
+ {{ $.Values.clickhouse.configmap.remote_servers.internal_replication | default "false" }}
+ {{ include "clickhouse.fullname" $ }}-{{ . }}.{{ include "clickhouse.fullname" $ }}-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}
+ {{ $.Values.clickhouse.tcp_port}}
+ {{- if $.Values.clickhouse.configmap.remote_servers.replica.user }}
+ {{ $.Values.clickhouse.configmap.remote_servers.replica.user }}
+ {{- end }}
+ {{- if $.Values.clickhouse.configmap.remote_servers.replica.password }}
+ {{ $.Values.clickhouse.configmap.remote_servers.replica.password }}
+ {{- end }}
+ {{- if $.Values.clickhouse.configmap.remote_servers.replica.secure }}
+ {{ $.Values.clickhouse.configmap.remote_servers.replica.secure }}
+ {{- end }}
+ {{- if $.Values.clickhouse.configmap.remote_servers.replica.compression }}
+ {{ $.Values.clickhouse.configmap.remote_servers.replica.compression }}
+ {{- end }}
+
+ {{- if $.Values.clickhouse.configmap.remote_servers.replica.backup.enabled }}
+
+ {{ include "clickhouse.fullname" $ }}-replica-{{ . }}.{{ include "clickhouse.fullname" $ }}-replica-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}
+ {{ $.Values.clickhouse.tcp_port}}
+ {{- if $.Values.clickhouse.configmap.remote_servers.replica.user }}
+ {{ $.Values.clickhouse.configmap.remote_servers.replica.user }}
+ {{- end }}
+ {{- if $.Values.clickhouse.configmap.remote_servers.replica.password }}
+ {{ $.Values.clickhouse.configmap.remote_servers.replica.password }}
+ {{- end }}
+ {{- if $.Values.clickhouse.configmap.remote_servers.replica.secure }}
+ {{ $.Values.clickhouse.configmap.remote_servers.replica.secure }}
+ {{- end }}
+ {{- if $.Values.clickhouse.configmap.remote_servers.replica.compression }}
+ {{ $.Values.clickhouse.configmap.remote_servers.replica.compression }}
+ {{- end }}
+
+ {{- end }}
+
+ {{- end }}
+ {{ include "clickhouse.fullname" . }}>
+
+ {{- end }}
+
+{{- end }}
diff --git a/clickhouse/templates/configmap-users.yaml b/clickhouse/templates/configmap-users.yaml
new file mode 100755
index 000000000..99dbdc3c2
--- /dev/null
+++ b/clickhouse/templates/configmap-users.yaml
@@ -0,0 +1,68 @@
+{{- if .Values.clickhouse.configmap.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "clickhouse.fullname" . }}-users
+ labels:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}-users
+ app.kubernetes.io/instance: {{ .Release.Name }}-users
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+data:
+ users.xml: |-
+
+
+ {{- if .Values.clickhouse.configmap.profiles.enabled }}
+
+ {{- range .Values.clickhouse.configmap.profiles.profile }}
+ {{- with . }}
+ <{{ .name }}>
+ {{- range $k_1, $v_1 := .config }}
+ <{{ $k_1 }}>{{ $v_1 }}{{ $k_1 }}>
+ {{- end }}
+ {{ .name }}>
+ {{- end }}
+ {{- end }}
+
+ {{- end }}
+
+ {{- if .Values.clickhouse.configmap.users.enabled }}
+
+ {{- range $key, $value := .Values.clickhouse.configmap.users.user }}
+ {{- with . }}
+ <{{ .name }}>
+ {{- range $k_1, $v_1 := .config }}
+ {{- if (eq "networks" $k_1) }}
+
+ {{- range $v_1 }}
+ {{- with .}}
+ {{ . }}
+ {{- end }}
+ {{- end }}
+
+ {{- else }}
+ <{{ $k_1 }}>{{ $v_1 }}{{ $k_1 }}>
+ {{- end }}
+ {{- end }}
+ {{ .name }}>
+ {{- end }}
+ {{- end }}
+
+ {{- end }}
+
+ {{- if .Values.clickhouse.configmap.quotas.enabled }}
+
+ {{- range $key, $value := .Values.clickhouse.configmap.quotas.quota }}
+ {{- with . }}
+ <{{ .name }}>
+ {{- range $val := .config }}
+ {{- range $k_1, $v_1 := $val }}
+ <{{ $k_1 }}>{{ $v_1 }}{{ $k_1 }}>
+ {{- end }}
+ {{- end }}
+ {{ .name }}>
+ {{- end }}
+ {{- end }}
+
+ {{- end }}
+
+{{- end }}
diff --git a/clickhouse/templates/deployment-tabix.yaml b/clickhouse/templates/deployment-tabix.yaml
new file mode 100755
index 000000000..e3c9e453e
--- /dev/null
+++ b/clickhouse/templates/deployment-tabix.yaml
@@ -0,0 +1,85 @@
+{{- if .Values.tabix.enabled }}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "clickhouse.fullname" . }}-tabix
+ labels:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}-tabix
+ app.kubernetes.io/instance: {{ .Release.Name }}-tabix
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ replicas: {{ .Values.tabix.replicas }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}-tabix
+ app.kubernetes.io/instance: {{ .Release.Name }}-tabix
+{{- if .Values.tabix.updateStrategy }}
+ strategy:
+ type: {{ .Values.tabix.updateStrategy.type }}
+ rollingUpdate:
+ maxSurge: {{ .Values.tabix.updateStrategy.maxSurge }}
+ maxUnavailable: {{ .Values.tabix.updateStrategy.maxUnavailable }}
+{{- end }}
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}-tabix
+ app.kubernetes.io/instance: {{ .Release.Name }}-tabix
+ spec:
+ {{- if .Values.affinity }}
+ affinity:
+{{ toYaml .Values.affinity | indent 8 }}
+ {{- end }}
+ {{- if .Values.tabix.imagePullSecrets }}
+ imagePullSecrets:
+ {{- range .Values.tabix.imagePullSecrets }}
+ - name: {{ . | quote }}
+ {{- end }}
+ {{- end }}
+ containers:
+ - name: {{ include "clickhouse.name" . }}-tabix
+ image: {{ .Values.tabix.image }}:{{ .Values.tabix.imageVersion }}
+ imagePullPolicy: {{ .Values.tabix.imagePullPolicy }}
+ ports:
+ - name: http
+ containerPort: 80
+ env:
+ {{- if .Values.tabix.security }}
+ - name: USER
+ value: {{ .Values.tabix.security.user }}
+ - name: PASSWORD
+ value: {{ .Values.tabix.security.password }}
+ {{- end }}
+ {{- if .Values.tabix.automaticConnection }}
+ - name: CH_NAME
+ value: {{ .Values.tabix.automaticConnection.chName }}
+ - name: CH_HOST
+ value: {{ .Values.tabix.automaticConnection.chHost }}
+ - name: CH_LOGIN
+ value: {{ .Values.tabix.automaticConnection.chLogin }}
+ - name: CH_PASSWORD
+ value: {{ .Values.tabix.automaticConnection.chPassword }}
+ - name: CH_PARAMS
+ value: {{ .Values.tabix.automaticConnection.chParams }}
+ {{- end }}
+ {{- if .Values.tabix.livenessProbe.enabled }}
+ livenessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: {{ .Values.tabix.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.tabix.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.tabix.livenessProbe.timeoutSeconds }}
+ failureThreshold: {{ .Values.tabix.livenessProbe.failureThreshold }}
+ successThreshold: {{ .Values.tabix.livenessProbe.successThreshold }}
+ {{- end }}
+ {{- if .Values.tabix.readinessProbe.enabled }}
+ readinessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: {{ .Values.tabix.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.tabix.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.tabix.readinessProbe.timeoutSeconds }}
+ failureThreshold: {{ .Values.tabix.readinessProbe.failureThreshold }}
+ successThreshold: {{ .Values.tabix.readinessProbe.successThreshold }}
+ {{- end }}
+{{- end }}
diff --git a/clickhouse/templates/ingress-clickhouse.yaml b/clickhouse/templates/ingress-clickhouse.yaml
new file mode 100755
index 000000000..a4a672b79
--- /dev/null
+++ b/clickhouse/templates/ingress-clickhouse.yaml
@@ -0,0 +1,27 @@
+{{- if .Values.clickhouse.ingress.enabled}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: {{ include "clickhouse.fullname" . }}
+ labels:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ rules:
+ host: {{ .Values.clickhouse.ingress.host }}
+ http:
+ paths:
+ - path: {{ .Values.clickhouse.ingress.path }}
+ backend:
+ serviceName: {{ include "clickhouse.fullname" . }}
+ servicePort: http
+{{- if .Values.clickhouse.ingress.tls.enabled }}
+ tls:
+ hosts:
+ {{- range .Values.clickhouse.ingress.tls.hosts }}
+ - {{ . | quote }}
+ {{- end }}
+ secretName: {{ .Values.clickhouse.ingress.tls.secretName }}
+{{- end }}
+{{- end }}
diff --git a/clickhouse/templates/ingress-tabix.yaml b/clickhouse/templates/ingress-tabix.yaml
new file mode 100755
index 000000000..3b85c886f
--- /dev/null
+++ b/clickhouse/templates/ingress-tabix.yaml
@@ -0,0 +1,29 @@
+{{- if .Values.tabix.enabled }}
+{{- if .Values.tabix.ingress.enabled}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: {{ include "clickhouse.fullname" . }}-tabix
+ labels:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}-tabix
+ app.kubernetes.io/instance: {{ .Release.Name }}-tabix
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ rules:
+ host: {{ .Values.tabix.ingress.host }}
+ http:
+ paths:
+ - path: {{ .Values.tabix.ingress.path }}
+ backend:
+ serviceName: {{ include "clickhouse.fullname" . }}-tabix
+ servicePort: http
+{{- if .Values.tabix.ingress.tls.enabled }}
+ tls:
+ hosts:
+ {{- range .Values.tabix.ingress.tls.hosts }}
+ - {{ . | quote }}
+ {{- end }}
+ secretName: {{ .Values.tabix.ingress.tls.secretName }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/clickhouse/templates/statefulset-clickhouse-replica.yaml b/clickhouse/templates/statefulset-clickhouse-replica.yaml
new file mode 100755
index 000000000..9b1db994f
--- /dev/null
+++ b/clickhouse/templates/statefulset-clickhouse-replica.yaml
@@ -0,0 +1,184 @@
+{{- if .Values.clickhouse.configmap.remote_servers.replica.backup.enabled }}
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ include "clickhouse.fullname" . }}-replica
+ labels:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}-replica
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ replicas: {{ .Values.clickhouse.replicas }}
+ podManagementPolicy: {{ .Values.clickhouse.podManagementPolicy }}
+ updateStrategy:
+ type: {{ .Values.clickhouse.updateStrategy }}
+ {{- if (eq "Recreate" .Values.clickhouse.updateStrategy) }}
+ rollingUpdate: null
+ {{- else if .Values.clickhouse.rollingUpdatePartition }}
+ rollingUpdate:
+ partition: {{ .Values.clickhouse.rollingUpdatePartition }}
+ {{- end }}
+ serviceName: {{ include "clickhouse.fullname" . }}-replica-headless
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}-replica
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}-replica
+ spec:
+ {{- if .Values.affinity }}
+ affinity:
+{{ toYaml .Values.affinity | indent 8 }}
+ {{- end }}
+ {{- if .Values.clickhouse.imagePullSecrets }}
+ imagePullSecrets:
+ {{- range .Values.clickhouse.imagePullSecrets }}
+ - name: {{ . | quote }}
+ {{- end }}
+ {{- end }}
+ initContainers:
+ - name: init
+ image: busybox:1.31.0
+ imagePullPolicy: IfNotPresent
+ args:
+ - /bin/sh
+ - -c
+ - |
+ mkdir -p /etc/clickhouse-server/metrica.d
+ containers:
+ - name: {{ include "clickhouse.fullname" . }}-replica
+ image: {{ .Values.clickhouse.image }}:{{ .Values.clickhouse.imageVersion }}
+ imagePullPolicy: {{ .Values.clickhouse.imagePullPolicy }}
+ ports:
+ - name: http-port
+ containerPort: {{ .Values.clickhouse.http_port | default "8123" }}
+ - name: tcp-port
+ containerPort: {{ .Values.clickhouse.tcp_port | default "9000" }}
+ - name: inter-http-port
+ containerPort: {{ .Values.clickhouse.interserver_http_port | default "9009" }}
+ {{- if .Values.clickhouse.livenessProbe.enabled }}
+ livenessProbe:
+ tcpSocket:
+ port: {{ .Values.clickhouse.tcp_port }}
+ initialDelaySeconds: {{ .Values.clickhouse.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.clickhouse.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.clickhouse.livenessProbe.timeoutSeconds }}
+ failureThreshold: {{ .Values.clickhouse.livenessProbe.failureThreshold }}
+ successThreshold: {{ .Values.clickhouse.livenessProbe.successThreshold }}
+ {{- end }}
+ {{- if .Values.clickhouse.readinessProbe.enabled }}
+ readinessProbe:
+ tcpSocket:
+ port: {{ .Values.clickhouse.tcp_port }}
+ initialDelaySeconds: {{ .Values.clickhouse.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.clickhouse.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.clickhouse.readinessProbe.timeoutSeconds }}
+ failureThreshold: {{ .Values.clickhouse.readinessProbe.failureThreshold }}
+ successThreshold: {{ .Values.clickhouse.readinessProbe.successThreshold }}
+ {{- end }}
+ volumeMounts:
+ - name: {{ include "clickhouse.fullname" . }}-replica-data
+ mountPath: {{ include "clickhouse.fullpath" . }}
+ - name: {{ include "clickhouse.fullname" . }}-replica-logs
+ mountPath: {{ include "clickhouse.logpath" . }}
+ - name: {{ include "clickhouse.fullname" . }}-config
+ mountPath: /etc/clickhouse-server/config.d
+ - name: {{ include "clickhouse.fullname" . }}-metrica
+ mountPath: /etc/clickhouse-server/metrica.d
+ - name: {{ include "clickhouse.fullname" . }}-users
+ mountPath: /etc/clickhouse-server/users.d
+ securityContext:
+ privileged: true
+ runAsUser: 0
+ {{- if .Values.clickhouse.imagePullSecrets }}
+ imagePullSecrets:
+ - name: {{ .Values.clickhouse.imagePullSecrets }}
+ {{- end }}
+ {{- if .Values.clickhouse.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.clickhouse.nodeSelector | indent 8 }}
+ {{- end }}
+ volumes:
+ - name: {{ include "clickhouse.fullname" . }}-replica-data
+ {{- if .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.enabled }}
+ persistentVolumeClaim:
+ claimName: {{ include "clickhouse.fullname" . }}-replica-data
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ - name: {{ include "clickhouse.fullname" . }}-replica-logs
+ {{- if .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.enabled }}
+ persistentVolumeClaim:
+ claimName: {{ include "clickhouse.fullname" . }}-replica-logs
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ {{- if .Values.clickhouse.configmap.enabled }}
+ - name: {{ include "clickhouse.fullname" . }}-config
+ configMap:
+ name: {{ include "clickhouse.fullname" . }}-config
+ items:
+ - key: config.xml
+ path: config.xml
+ - name: {{ include "clickhouse.fullname" . }}-metrica
+ configMap:
+ name: {{ include "clickhouse.fullname" . }}-metrica
+ items:
+ - key: metrica.xml
+ path: metrica.xml
+ - name: {{ include "clickhouse.fullname" . }}-users
+ configMap:
+ name: {{ include "clickhouse.fullname" . }}-users
+ items:
+ - key: users.xml
+ path: users.xml
+ {{- end }}
+{{- if .Values.clickhouse.persistentVolumeClaim.enabled }}
+ volumeClaimTemplates:
+{{- if .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.enabled }}
+ - metadata:
+ name: {{ include "clickhouse.fullname" . }}-replica-data
+ labels:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica-data
+ app.kubernetes.io/instance: {{ .Release.Name }}-replica-data
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ spec:
+ accessModes:
+ {{- range .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ {{- if (eq "-" .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.storageClassName) }}
+ storageClassName: ""
+ {{- else }}
+ storageClassName: {{ .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.storageClassName }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.storage }}
+{{- end }}
+{{- if .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.enabled }}
+ - metadata:
+ name: {{ include "clickhouse.fullname" . }}-replica-logs
+ labels:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica-logs
+ app.kubernetes.io/instance: {{ .Release.Name }}-replica-logs
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ spec:
+ accessModes:
+ {{- range .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ {{- if (eq "-" .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.storageClassName) }}
+ storageClassName: ""
+ {{- else }}
+ storageClassName: {{ .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.storageClassName }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.storage }}
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/clickhouse/templates/statefulset-clickhouse.yaml b/clickhouse/templates/statefulset-clickhouse.yaml
new file mode 100755
index 000000000..ec8712749
--- /dev/null
+++ b/clickhouse/templates/statefulset-clickhouse.yaml
@@ -0,0 +1,182 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ include "clickhouse.fullname" . }}
+ labels:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ replicas: {{ .Values.clickhouse.replicas }}
+ podManagementPolicy: {{ .Values.clickhouse.podManagementPolicy }}
+ updateStrategy:
+ type: {{ .Values.clickhouse.updateStrategy }}
+ {{- if (eq "Recreate" .Values.clickhouse.updateStrategy) }}
+ rollingUpdate: null
+ {{- else if .Values.clickhouse.rollingUpdatePartition }}
+ rollingUpdate:
+ partition: {{ .Values.clickhouse.rollingUpdatePartition }}
+ {{- end }}
+ serviceName: {{ include "clickhouse.fullname" . }}-headless
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ spec:
+ {{- if .Values.affinity }}
+ affinity:
+{{ toYaml .Values.affinity | indent 8 }}
+ {{- end }}
+ {{- if .Values.clickhouse.imagePullSecrets }}
+ imagePullSecrets:
+ {{- range .Values.clickhouse.imagePullSecrets }}
+ - name: {{ . | quote }}
+ {{- end }}
+ {{- end }}
+ initContainers:
+ - name: init
+ image: busybox:1.31.0
+ imagePullPolicy: IfNotPresent
+ args:
+ - /bin/sh
+ - -c
+ - |
+ mkdir -p /etc/clickhouse-server/metrica.d
+ containers:
+ - name: {{ include "clickhouse.fullname" . }}
+ image: {{ .Values.clickhouse.image }}:{{ .Values.clickhouse.imageVersion }}
+ imagePullPolicy: {{ .Values.clickhouse.imagePullPolicy }}
+ ports:
+ - name: http-port
+ containerPort: {{ .Values.clickhouse.http_port | default "8123" }}
+ - name: tcp-port
+ containerPort: {{ .Values.clickhouse.tcp_port | default "9000" }}
+ - name: inter-http-port
+ containerPort: {{ .Values.clickhouse.interserver_http_port | default "9009" }}
+ {{- if .Values.clickhouse.livenessProbe.enabled }}
+ livenessProbe:
+ tcpSocket:
+ port: {{ .Values.clickhouse.tcp_port }}
+ initialDelaySeconds: {{ .Values.clickhouse.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.clickhouse.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.clickhouse.livenessProbe.timeoutSeconds }}
+ failureThreshold: {{ .Values.clickhouse.livenessProbe.failureThreshold }}
+ successThreshold: {{ .Values.clickhouse.livenessProbe.successThreshold }}
+ {{- end }}
+ {{- if .Values.clickhouse.readinessProbe.enabled }}
+ readinessProbe:
+ tcpSocket:
+ port: {{ .Values.clickhouse.tcp_port }}
+ initialDelaySeconds: {{ .Values.clickhouse.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.clickhouse.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.clickhouse.readinessProbe.timeoutSeconds }}
+ failureThreshold: {{ .Values.clickhouse.readinessProbe.failureThreshold }}
+ successThreshold: {{ .Values.clickhouse.readinessProbe.successThreshold }}
+ {{- end }}
+ volumeMounts:
+ - name: {{ include "clickhouse.fullname" . }}-data
+ mountPath: {{ include "clickhouse.fullpath" . }}
+ - name: {{ include "clickhouse.fullname" . }}-logs
+ mountPath: {{ include "clickhouse.logpath" . }}
+ - name: {{ include "clickhouse.fullname" . }}-config
+ mountPath: /etc/clickhouse-server/config.d
+ - name: {{ include "clickhouse.fullname" . }}-metrica
+ mountPath: /etc/clickhouse-server/metrica.d
+ - name: {{ include "clickhouse.fullname" . }}-users
+ mountPath: /etc/clickhouse-server/users.d
+ securityContext:
+ privileged: true
+ runAsUser: 0
+ {{- if .Values.clickhouse.imagePullSecrets }}
+ imagePullSecrets:
+ - name: {{ .Values.clickhouse.imagePullSecrets }}
+ {{- end }}
+ {{- if .Values.clickhouse.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.clickhouse.nodeSelector | indent 8 }}
+ {{- end }}
+ volumes:
+ - name: {{ include "clickhouse.fullname" . }}-data
+ {{- if .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.enabled }}
+ persistentVolumeClaim:
+ claimName: {{ include "clickhouse.fullname" . }}-data
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ - name: {{ include "clickhouse.fullname" . }}-logs
+ {{- if .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.enabled }}
+ persistentVolumeClaim:
+ claimName: {{ include "clickhouse.fullname" . }}-logs
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ {{- if .Values.clickhouse.configmap.enabled }}
+ - name: {{ include "clickhouse.fullname" . }}-config
+ configMap:
+ name: {{ include "clickhouse.fullname" . }}-config
+ items:
+ - key: config.xml
+ path: config.xml
+ - name: {{ include "clickhouse.fullname" . }}-metrica
+ configMap:
+ name: {{ include "clickhouse.fullname" . }}-metrica
+ items:
+ - key: metrica.xml
+ path: metrica.xml
+ - name: {{ include "clickhouse.fullname" . }}-users
+ configMap:
+ name: {{ include "clickhouse.fullname" . }}-users
+ items:
+ - key: users.xml
+ path: users.xml
+ {{- end }}
+{{- if .Values.clickhouse.persistentVolumeClaim.enabled }}
+ volumeClaimTemplates:
+{{- if .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.enabled }}
+ - metadata:
+ name: {{ include "clickhouse.fullname" . }}-data
+ labels:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}-data
+ app.kubernetes.io/instance: {{ .Release.Name }}-data
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ spec:
+ accessModes:
+ {{- range .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ {{- if (eq "-" .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.storageClassName) }}
+ storageClassName: ""
+ {{- else }}
+ storageClassName: {{ .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.storageClassName | quote }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.storage | quote }}
+{{- end }}
+{{- if .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.enabled }}
+ - metadata:
+ name: {{ include "clickhouse.fullname" . }}-logs
+ labels:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}-logs
+ app.kubernetes.io/instance: {{ .Release.Name }}-logs
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ spec:
+ accessModes:
+ {{- range .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ {{- if (eq "-" .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.storageClassName) }}
+ storageClassName: ""
+ {{- else }}
+ storageClassName: {{ .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.storageClassName | quote }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.storage | quote }}
+{{- end }}
+{{- end }}
diff --git a/clickhouse/templates/svc-clickhouse-headless.yaml b/clickhouse/templates/svc-clickhouse-headless.yaml
new file mode 100755
index 000000000..980c27730
--- /dev/null
+++ b/clickhouse/templates/svc-clickhouse-headless.yaml
@@ -0,0 +1,26 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "clickhouse.fullname" . }}-headless
+ labels:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}-headless
+ app.kubernetes.io/instance: {{ .Release.Name }}-headless
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ clusterIP: "None"
+ ports:
+ - port: {{ .Values.clickhouse.tcp_port }}
+ targetPort: tcp-port
+ protocol: TCP
+ name: tcp-port
+ - port: {{ .Values.clickhouse.http_port }}
+ targetPort: http-port
+ protocol: TCP
+ name: http-port
+ - port: {{ .Values.clickhouse.interserver_http_port }}
+ targetPort: inter-http-port
+ protocol: TCP
+ name: inter-http-port
+ selector:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
diff --git a/clickhouse/templates/svc-clickhouse-replica-headless.yaml b/clickhouse/templates/svc-clickhouse-replica-headless.yaml
new file mode 100755
index 000000000..b26448d99
--- /dev/null
+++ b/clickhouse/templates/svc-clickhouse-replica-headless.yaml
@@ -0,0 +1,26 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "clickhouse.fullname" . }}-replica-headless
+ labels:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica-headless
+ app.kubernetes.io/instance: {{ .Release.Name }}-replica-headless
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ clusterIP: "None"
+ ports:
+ - port: {{ .Values.clickhouse.tcp_port }}
+ targetPort: tcp-port
+ protocol: TCP
+ name: tcp-port
+ - port: {{ .Values.clickhouse.http_port }}
+ targetPort: http-port
+ protocol: TCP
+ name: http-port
+ - port: {{ .Values.clickhouse.interserver_http_port }}
+ targetPort: inter-http-port
+ protocol: TCP
+ name: inter-http-port
+ selector:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}-replica
diff --git a/clickhouse/templates/svc-clickhouse-replica.yaml b/clickhouse/templates/svc-clickhouse-replica.yaml
new file mode 100755
index 000000000..b3fd7eef5
--- /dev/null
+++ b/clickhouse/templates/svc-clickhouse-replica.yaml
@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "clickhouse.fullname" . }}-replica
+ labels:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}-replica
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ ports:
+ - port: {{ .Values.clickhouse.tcp_port }}
+ targetPort: tcp-port
+ protocol: TCP
+ name: tcp-port
+ - port: {{ .Values.clickhouse.http_port }}
+ targetPort: http-port
+ protocol: TCP
+ name: http-port
+ - port: {{ .Values.clickhouse.interserver_http_port }}
+ targetPort: inter-http-port
+ protocol: TCP
+ name: inter-http-port
+ selector:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica
+ app.kubernetes.io/instance: {{ .Release.Name }}-replica
diff --git a/clickhouse/templates/svc-clickhouse.yaml b/clickhouse/templates/svc-clickhouse.yaml
new file mode 100755
index 000000000..b73c81a52
--- /dev/null
+++ b/clickhouse/templates/svc-clickhouse.yaml
@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "clickhouse.fullname" . }}
+ labels:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ ports:
+ - port: {{ .Values.clickhouse.tcp_port }}
+ targetPort: tcp-port
+ protocol: TCP
+ name: tcp-port
+ - port: {{ .Values.clickhouse.http_port }}
+ targetPort: http-port
+ protocol: TCP
+ name: http-port
+ - port: {{ .Values.clickhouse.interserver_http_port }}
+ targetPort: inter-http-port
+ protocol: TCP
+ name: inter-http-port
+ selector:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
diff --git a/clickhouse/templates/svc-tabix.yaml b/clickhouse/templates/svc-tabix.yaml
new file mode 100755
index 000000000..56df5caa2
--- /dev/null
+++ b/clickhouse/templates/svc-tabix.yaml
@@ -0,0 +1,19 @@
+{{- if .Values.tabix.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "clickhouse.fullname" . }}-tabix
+ labels:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}-tabix
+ app.kubernetes.io/instance: {{ .Release.Name }}-tabix
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ ports:
+ - port: 80
+ targetPort: http
+ protocol: TCP
+ name: http
+ selector:
+ app.kubernetes.io/name: {{ include "clickhouse.name" . }}-tabix
+ app.kubernetes.io/instance: {{ .Release.Name }}-tabix
+{{- end }}
diff --git a/clickhouse/values.yaml b/clickhouse/values.yaml
new file mode 100755
index 000000000..0c8ac2208
--- /dev/null
+++ b/clickhouse/values.yaml
@@ -0,0 +1,372 @@
+## Timezone
+timezone: "Asia/Shanghai"
+
+## Cluster domain
+clusterDomain: "cluster.local"
+
+##
+## Clickhouse Node selectors and tolerations for pod assignment
+## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
+##
+# nodeSelector: {"beta.kubernetes.io/arch": "amd64"}
+# tolerations: []
+## Clickhouse pod/node affinity/anti-affinity
+##
+#affinity:
+# nodeAffinity:
+# requiredDuringSchedulingIgnoredDuringExecution:
+# nodeSelectorTerms:
+# - matchExpressions:
+# - key: "application/clickhouse"
+# operator: In
+# values:
+# - "true"
+
+clickhouse:
+ ## StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel
+ ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy
+ ##
+ podManagementPolicy: "Parallel"
+
+ ## StatefulSet controller supports automated updates. There are two valid update strategies: RollingUpdate and OnDelete
+ ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets
+ ##
+ updateStrategy: "RollingUpdate"
+
+ ## Partition update strategy
+ ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions
+ ##
+ # rollingUpdatePartition:
+
+ ##
+ ## The path to the directory containing data.
+ ## Default value: /var/lib/clickhouse
+ path: "/var/lib/clickhouse"
+ ##
+ ## The port for connecting to the server over HTTP
+ http_port: "8123"
+ ##
+ ## Port for communicating with clients over the TCP protocol.
+ tcp_port: "9000"
+ ##
+ ## Port for exchanging data between ClickHouse servers.
+ interserver_http_port: "9009"
+ ##
+ ## The instance number of Clickhouse
+ replicas: "3"
+ ## Clickhouse image configuration.
+ image: "yandex/clickhouse-server"
+ imageVersion: "19.14"
+ imagePullPolicy: "IfNotPresent"
+ #imagePullSecrets:
+ ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
+ ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: "30"
+ periodSeconds: "30"
+ timeoutSeconds: "5"
+ failureThreshold: "3"
+ successThreshold: "1"
+ ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
+ ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: "30"
+ periodSeconds: "30"
+ timeoutSeconds: "5"
+ failureThreshold: "3"
+ successThreshold: "1"
+ ## volumeClaimTemplates is a list of claims that pods are allowed to reference.
+ ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
+ ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
+ ## A claim in this list takes precedence over any volumes in the template, with the same name.
+ persistentVolumeClaim:
+ enabled: false
+ ## Clickhouse data volume
+ dataPersistentVolume:
+ enabled: false
+ accessModes:
+ - "ReadWriteOnce"
+ storage: "500Gi"
+ ## Clickhouse logs volume
+ logsPersistentVolume:
+ enabled: false
+ accessModes:
+ - "ReadWriteOnce"
+ storage: "50Gi"
+ ##
+ ## An API object that manages external access to the services in a cluster, typically HTTP.
+ ## Ingress can provide load balancing, SSL termination and name-based virtual hosting.
+ ingress:
+ enabled: false
+ # host: "clickhouse.domain.com"
+ # path: "/"
+ # tls:
+ # enabled: false
+ # hosts:
+ # - "clickhouse.domain.com"
+ # - "clickhouse.domain1.com"
+ # secretName: "clickhouse-secret"
+ ##
+ ## Clickhouse config.xml and metrica.xml
+ configmap:
+ ##
+ ## If Configmap's enabled is `true`, Custom config.xml and metrica.xml
+ enabled: true
+ ##
+ ## The maximum number of inbound connections.
+ max_connections: "4096"
+ ##
+ ## The number of seconds that ClickHouse waits for incoming requests before closing the connection.
+ keep_alive_timeout: "3"
+ ##
+ ## The maximum number of simultaneously processed requests.
+ max_concurrent_queries: "100"
+ ##
+ ## Cache size (in bytes) for uncompressed data used by table engines from the MergeTree.
+ ## There is one shared cache for the server. Memory is allocated on demand. The cache is used if the option use_uncompressed_cache is enabled.
+ ## The uncompressed cache is advantageous for very short queries in individual cases.
+ uncompressed_cache_size: "8589934592"
+ ##
+ ## Approximate size (in bytes) of the cache of "marks" used by MergeTree.
+ ## The cache is shared for the server and memory is allocated as needed. The cache size must be at least 5368709120.
+ mark_cache_size: "5368709120"
+ ##
+ ## You can specify umask here (see "man umask"). Server will apply it on startup.
+ ## Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
+ umask: "022"
+ ##
+ ## Perform mlockall after startup to lower first queries latency and to prevent clickhouse executable from being paged out under high IO load.
+ ## Enabling this option is recommended but will lead to increased startup time for up to a few seconds.
+ mlock_executable: false
+ ##
+ ## The interval in seconds before reloading built-in dictionaries.
+ ## ClickHouse reloads built-in dictionaries every x seconds. This makes it possible to edit dictionaries "on the fly" without restarting the server.
+ builtin_dictionaries_reload_interval: "3600"
+ ##
+ ## Maximum session timeout, in seconds.
+ max_session_timeout: "3600"
+ ##
+ ## Default session timeout, in seconds.
+ default_session_timeout: "60"
+ ##
+ ## Uncomment to disable ClickHouse internal DNS caching.
+ disable_internal_dns_cache: "1"
+ ##
+ ## The maximum number of open files.
+ ## We recommend using this option in Mac OS X, since the getrlimit() function returns an incorrect value.
+ #max_open_files:
+ ##
+ ## The host name that can be used by other servers to access this server.
+ ## If omitted, it is defined in the same way as the hostname-f command.
+ ## Useful for breaking away from a specific network interface.
+ #interserver_http_host:
+ ##
+ ## Logging settings.
+ # path – The log path. Default value: /var/log/clickhouse-server.
+ # level – Logging level. Acceptable values: trace, debug, information, warning, error. Default value: /var/log/clickhouse-server
+ # size – Size of the file. Applies to loganderrorlog. Once the file reaches size, ClickHouse archives and renames it, and creates a new log file in its place.
+ # count – The number of archived log files that ClickHouse stores.
+ logger:
+ path: "/var/log/clickhouse-server"
+ level: "trace"
+ size: "1000M"
+ count: "10"
+ ##
+ ## Data compression settings.
+ # min_part_size – The minimum size of a table part.
+ # min_part_size_ratio – The ratio of the minimum size of a table part to the full size of the table.
+ # method – Compression method. Acceptable values : lz4 or zstd(experimental).
+ compression:
+ enabled: false
+ cases:
+ - min_part_size: "10000000000"
+ min_part_size_ratio: "0.01"
+ method: "zstd"
+ ##
+ ## Contains settings that allow ClickHouse to interact with a ZooKeeper cluster.
+ ## ClickHouse uses ZooKeeper for storing metadata of replicas when using replicated tables. If replicated tables are not used, this section of parameters can be omitted.
+ # node — ZooKeeper endpoint. You can set multiple endpoints.
+ # session_timeout — Maximum timeout for the client session in milliseconds.
+ # root — The znode that is used as the root for znodes used by the ClickHouse server. Optional.
+ # identity — User and password, that can be required by ZooKeeper to give access to requested znodes. Optional.
+ zookeeper_servers:
+ enabled: false
+ session_timeout_ms: "30000"
+ operation_timeout_ms: "10000"
+ #root: "/path/to/zookeeper/node"
+ #identity: "user:password"
+ config:
+ - index: ""
+ host: ""
+ port: ""
+ ##
+ ## Configuration of clusters used by the Distributed table engine.
+ ## The parameters host, port, and optionally user, password, secure, compression are specified for each server:
+ # host – The address of the remote server.
+ # port – The TCP port for messenger activity ('tcp_port' in the config, usually set to 9000).
+ # user – Name of the user for connecting to a remote server. Access is configured in the users.xml file. For more information, see the section "Access rights".
+ # password – The password for connecting to a remote server (not masked).
+ # secure - Use ssl for connection, usually you also should define port = 9440. Server should listen on 9440 and have correct certificates.
+ # compression - Use data compression. Default value: true.
+ remote_servers:
+ enabled: true
+ internal_replication: false
+ replica:
+ user: "default"
+ #password: ""
+ compression: true
+ backup:
+ enabled: true
+ ##
+ ## Sending data to Graphite.
+ # interval – The interval for sending, in seconds.
+ # timeout – The timeout for sending data, in seconds.
+ # root_path – Prefix for keys.
+ # metrics – Sending data from a :ref:system_tables-system.metrics table.
+ # events – Sending data from a :ref:system_tables-system.events table.
+ # asynchronous_metrics – Sending data from a :ref:system_tables-system.asynchronous_metrics table.
+ ## You can configure multiple clauses. For instance, you can use this for sending different data at different intervals.
+ graphite:
+ enabled: false
+ config:
+ - timeout: "0.1"
+ interval: "60"
+ root_path: "one_min"
+ metrics: true
+ events: true
+ events_cumulative: true
+ asynchronous_metrics: true
+ ##
+ ## A settings profile is a collection of settings grouped under the same name.
+ ## Each ClickHouse user has a profile.
+ ## To apply all the settings in a profile, set the profile setting.
+ ## More info: https://clickhouse.yandex/docs/en/operations/settings/settings_profiles/
+ profiles:
+ enabled: false
+ profile:
+ - name: "default"
+ config:
+ max_memory_usage: "10000000000"
+ use_uncompressed_cache: "0"
+ load_balancing: "random"
+ ##
+ ## The users section of the user.xml configuration file contains user settings.
+ ## More info: https://clickhouse.yandex/docs/en/operations/settings/settings_users/
+ users:
+ enabled: false
+ user:
+ - name: "default"
+ config:
+ #password: ""
+ networks:
+ - "::/0"
+ profile: "default"
+ quota: "default"
+ ##
+ ## Quotas allow you to limit resource usage over a period of time, or simply track the use of resources.
+ ## Quotas are set up in the user config. This is usually 'users.xml'.
+ ## More info: https://clickhouse.yandex/docs/en/operations/quotas/
+ quotas:
+ enabled: false
+ quota:
+ - name: "default"
+ config:
+ - duration: "3600"
+ queries: "0"
+ errors: "0"
+ result_rows: "0"
+ read_rows: "0"
+ execution_time: "0"
+
+##
+## Web interface for ClickHouse in the Tabix project.
+## Features:
+# Works with ClickHouse directly from the browser, without the need to install additional software.
+# Query editor with syntax highlighting.
+# Auto-completion of commands.
+# Tools for graphical analysis of query execution.
+# Color scheme options.
+tabix:
+ ##
+ ## Enable Tabix
+ enabled: true
+ ##
+ ## ## The instance number of Tabix
+ replicas: "1"
+ ##
+ ## The deployment strategy to use to replace existing pods with new ones.
+ updateStrategy:
+ ##
+ ## Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
+ type: "RollingUpdate"
+ ##
+ ## The maximum number of pods that can be scheduled above the desired number of pods.
+ maxSurge: 3
+ ##
+ ## The maximum number of pods that can be unavailable during the update.
+ maxUnavailable: 1
+ ##
+ ## Docker image name.
+ image: "spoonest/clickhouse-tabix-web-client"
+ ##
+ ## Docker image version
+ imageVersion: "stable"
+ ##
+ ## Image pull policy. One of Always, Never, IfNotPresent.
+ ## Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated.
+ ## More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ imagePullPolicy: "IfNotPresent"
+ ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
+ ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: "30"
+ periodSeconds: "30"
+ timeoutSeconds: "5"
+ failureThreshold: "3"
+ successThreshold: "1"
+ ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
+ ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: "30"
+ periodSeconds: "30"
+ timeoutSeconds: "5"
+ failureThreshold: "3"
+ successThreshold: "1"
+ ##
+ ## ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
+ ## If specified, these secrets will be passed to individual puller implementations for them to use.
+ ## For example, in the case of docker, only DockerConfig type secrets are honored.
+ ## More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
+ #imagePullSecrets:
+ ##
+ ## You can limit access to your tabix.ui application on the proxy level.
+ ## User and Password parameters to restrict access only for specified user.
+ security:
+ user: "admin"
+ password: "admin"
+ ##
+ ## You can automatically connect to a Clickhouse server by specifying chName, chHost, chHost, chPassword and/or chParams environment variables.
+ #automaticConnection:
+ # chName: "test"
+ # chHost: "test"
+ # chLogin: "test"
+ # chPassword: "test"
+ # chParams: ""
+ ##
+ ## An API object that manages external access to the services in a cluster, typically HTTP.
+ ## Ingress can provide load balancing, SSL termination and name-based virtual hosting.
+ ingress:
+ enabled: false
+ # host: "tabix.domain.com"
+ # path: "/"
+ # tls:
+ # enabled: false
+ # hosts:
+ # - "tabix.domain.com"
+ # - "tabix.domain1.com"
+ # secretName: "tabix-secret"