diff --git a/charts/tidb-cluster/templates/_helpers.tpl b/charts/tidb-cluster/templates/_helpers.tpl index 837138eadc0..766d43789b7 100644 --- a/charts/tidb-cluster/templates/_helpers.tpl +++ b/charts/tidb-cluster/templates/_helpers.tpl @@ -28,7 +28,7 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this {{- end -}} {{- define "cluster.scheme" -}} -{{ if .Values.enableTLSCluster }}https{{ else }}http{{ end }} +{{ if and .Values.tlsCluster .Values.tlsCluster.enabled }}https{{ else }}http{{ end }} {{- end -}} {{/* @@ -41,9 +41,9 @@ config-file: |- {{- if .Values.pd.config }} {{ .Values.pd.config | indent 2 }} {{- end -}} - {{- if .Values.enableTLSCluster }} + {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }} [security] - cacert-path = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + cacert-path = "/var/lib/pd-tls/ca.crt" cert-path = "/var/lib/pd-tls/tls.crt" key-path = "/var/lib/pd-tls/tls.key" {{- end -}} @@ -64,9 +64,9 @@ config-file: |- {{- if .Values.tikv.config }} {{ .Values.tikv.config | indent 2 }} {{- end -}} - {{- if .Values.enableTLSCluster }} + {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }} [security] - ca-path = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + ca-path = "/var/lib/tikv-tls/ca.crt" cert-path = "/var/lib/tikv-tls/tls.crt" key-path = "/var/lib/tikv-tls/tls.key" {{- end -}} @@ -91,11 +91,11 @@ config-file: |- {{- if .Values.tidb.config }} {{ .Values.tidb.config | indent 2 }} {{- end -}} - {{- if or .Values.enableTLSCluster (and .Values.tidb.tlsClient .Values.tidb.tlsClient.enabled) }} + {{- if or (and .Values.tlsCluster .Values.tlsCluster.enabled) (and .Values.tidb.tlsClient .Values.tidb.tlsClient.enabled) }} [security] {{- end -}} - {{- if .Values.enableTLSCluster }} - cluster-ssl-ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }} + cluster-ssl-ca = "/var/lib/tidb-tls/ca.crt" cluster-ssl-cert = "/var/lib/tidb-tls/tls.crt" cluster-ssl-key = "/var/lib/tidb-tls/tls.key" {{- end -}} @@ -122,7 +122,7 @@ Encapsulate pump configmap data for consistent digest calculation pump-config: |- {{- if .Values.binlog.pump.config }} {{ .Values.binlog.pump.config | indent 2 }} - {{- if .Values.enableTLSCluster }} + {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }} [security] ssl-ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" ssl-cert = "/var/lib/pump-tls/tls.crt" diff --git a/charts/tidb-cluster/templates/config/_prometheus-config.tpl b/charts/tidb-cluster/templates/config/_prometheus-config.tpl index 34f3f0f4d04..12bdc79bee9 100644 --- a/charts/tidb-cluster/templates/config/_prometheus-config.tpl +++ b/charts/tidb-cluster/templates/config/_prometheus-config.tpl @@ -19,13 +19,17 @@ scrape_configs: names: - {{ .Release.Namespace }} {{- end }} + {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }} + scheme: https + tls_config: + insecure_skip_verify: false + ca_file: /var/lib/cluster-client-tls/ca.crt + cert_file: /var/lib/cluster-client-tls/tls.crt + key_file: /var/lib/cluster-client-tls/tls.key + {{- else }} + scheme: http tls_config: insecure_skip_verify: true - {{- if .Values.enableTLSCluster }} - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - cert_file: /var/lib/pd-client-tls/tls.crt - key_file: /var/lib/pd-client-tls/tls.key - scheme: https {{- end }} relabel_configs: - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance] @@ -41,11 +45,12 @@ scrape_configs: action: replace target_label: __metrics_path__ regex: (.+) - - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] - action: replace - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 + - source_labels: [__meta_kubernetes_pod_name, __meta_kubernetes_pod_label_app_kubernetes_io_instance, + __meta_kubernetes_pod_annotation_prometheus_io_port] + regex: (.+);(.+);(.+) target_label: __address__ + replacement: $1.$2-pd-peer:$3 + action: replace - source_labels: [__meta_kubernetes_namespace] action: replace target_label: kubernetes_namespace @@ -71,13 +76,17 @@ scrape_configs: names: - {{ .Release.Namespace }} {{- end }} + {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }} + scheme: https + tls_config: + insecure_skip_verify: false + ca_file: /var/lib/cluster-client-tls/ca.crt + cert_file: /var/lib/cluster-client-tls/tls.crt + key_file: /var/lib/cluster-client-tls/tls.key + {{- else }} + scheme: http tls_config: insecure_skip_verify: true - {{- if .Values.enableTLSCluster }} - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - cert_file: /var/lib/pd-client-tls/tls.crt - key_file: /var/lib/pd-client-tls/tls.key - scheme: https {{- end }} relabel_configs: - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance] @@ -93,11 +102,12 @@ scrape_configs: action: replace target_label: __metrics_path__ regex: (.+) - - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] - action: replace - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 + - source_labels: [__meta_kubernetes_pod_name, __meta_kubernetes_pod_label_app_kubernetes_io_instance, + __meta_kubernetes_pod_annotation_prometheus_io_port] + regex: (.+);(.+);(.+) target_label: __address__ + replacement: $1.$2-tidb-peer:$3 + action: replace - source_labels: [__meta_kubernetes_namespace] action: replace target_label: kubernetes_namespace @@ -123,16 +133,23 @@ scrape_configs: names: - {{ .Release.Namespace }} {{- end }} + scheme: http tls_config: insecure_skip_verify: true -# TiKV doesn't support scheme https for now. -# And we should fix it after TiKV fix this issue: https://github.com/tikv/tikv/issues/5340 -# {{- if .Values.enableTLSCluster }} -# ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -# cert_file: /var/lib/pd-client-tls/tls.crt -# key_file: /var/lib/pd-client-tls/tls.key -# scheme: https -# {{- end }} + # TiKV doesn't support scheme https for now. + # And we should fix it after TiKV fix this issue: https://github.com/tikv/tikv/issues/5340 + # {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }} + # scheme: https + # tls_config: + # insecure_skip_verify: false + # ca_file: /var/lib/cluster-client-tls/ca.crt + # cert_file: /var/lib/cluster-client-tls/tls.crt + # key_file: /var/lib/cluster-client-tls/tls.key + # {{- else }} + # scheme: http + # tls_config: + # insecure_skip_verify: true + # {{- end }} relabel_configs: - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance] action: keep @@ -147,11 +164,12 @@ scrape_configs: action: replace target_label: __metrics_path__ regex: (.+) - - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] - action: replace - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 + - source_labels: [__meta_kubernetes_pod_name, __meta_kubernetes_pod_label_app_kubernetes_io_instance, + __meta_kubernetes_pod_annotation_prometheus_io_port] + regex: (.+);(.+);(.+) target_label: __address__ + replacement: $1.$2-tikv-peer:$3 + action: replace - source_labels: [__meta_kubernetes_namespace] action: replace target_label: kubernetes_namespace diff --git a/charts/tidb-cluster/templates/config/_pump-config.tpl b/charts/tidb-cluster/templates/config/_pump-config.tpl index 9c20bb80203..07795a899ef 100644 --- a/charts/tidb-cluster/templates/config/_pump-config.tpl +++ b/charts/tidb-cluster/templates/config/_pump-config.tpl @@ -35,7 +35,7 @@ sync-log = {{ .Values.binlog.pump.syncLog | default true }} # write-buffer = 67108864 # write-L0-pause-trigger = 24 # write-L0-slowdown-trigger = 17 -{{ if .Values.enableTLSCluster }} +{{ if and .Values.tlsCluster .Values.tlsCluster.enabled }} [security] # Path of file that contains list of trusted SSL CAs for connection with cluster components. ssl-ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" diff --git a/charts/tidb-cluster/templates/monitor-deployment.yaml b/charts/tidb-cluster/templates/monitor-deployment.yaml index 3b7c82c7f45..b297a1553c6 100644 --- a/charts/tidb-cluster/templates/monitor-deployment.yaml +++ b/charts/tidb-cluster/templates/monitor-deployment.yaml @@ -134,9 +134,9 @@ spec: - name: prometheus-rules mountPath: /prometheus-rules readOnly: false - {{- if .Values.enableTLSCluster }} - - name: tls-pd-client - mountPath: /var/lib/pd-client-tls + {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }} + - name: cluster-client-tls + mountPath: /var/lib/cluster-client-tls readOnly: true {{- end }} {{- if .Values.monitor.grafana.create }} @@ -241,11 +241,11 @@ spec: name: prometheus-rules - emptyDir: {} name: grafana-dashboard - {{- if .Values.enableTLSCluster }} - - name: tls-pd-client + {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }} + - name: cluster-client-tls secret: defaultMode: 420 - secretName: {{ .Release.Name }}-pd-client + secretName: {{ .Release.Name }}-cluster-client-secret {{- end }} {{- if .Values.monitor.tolerations }} tolerations: diff --git a/charts/tidb-cluster/templates/pump-statefulset.yaml b/charts/tidb-cluster/templates/pump-statefulset.yaml index f1bd44510ea..f73fa3c06db 100644 --- a/charts/tidb-cluster/templates/pump-statefulset.yaml +++ b/charts/tidb-cluster/templates/pump-statefulset.yaml @@ -55,7 +55,7 @@ spec: mountPath: /data - name: config mountPath: /etc/pump - {{- if .Values.enableTLSCluster }} + {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }} - name: pump-tls mountPath: /var/lib/pump-tls readOnly: true @@ -78,7 +78,7 @@ spec: items: - key: pump-config path: pump.toml - {{- if .Values.enableTLSCluster }} + {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }} - name: pump-tls secret: secretName: {{ include "pump.tlsSecretName" . }} diff --git a/charts/tidb-cluster/templates/tidb-cluster.yaml b/charts/tidb-cluster/templates/tidb-cluster.yaml index 3a94ea400b2..1567b6d9df9 100644 --- a/charts/tidb-cluster/templates/tidb-cluster.yaml +++ b/charts/tidb-cluster/templates/tidb-cluster.yaml @@ -21,7 +21,10 @@ spec: pvReclaimPolicy: {{ .Values.pvReclaimPolicy }} enablePVReclaim: {{ .Values.enablePVReclaim }} timezone: {{ .Values.timezone | default "UTC" }} - enableTLSCluster: {{ .Values.enableTLSCluster | default false }} +{{- if .Values.tlsCluster }} + tlsCluster: +{{ toYaml .Values.tlsCluster | indent 4 }} +{{- end }} services: {{ toYaml .Values.services | indent 4 }} schedulerName: {{ .Values.schedulerName | default "default-scheduler" }} diff --git a/charts/tidb-cluster/values.yaml b/charts/tidb-cluster/values.yaml index 82f1aa81889..ebe10a85b40 100644 --- a/charts/tidb-cluster/values.yaml +++ b/charts/tidb-cluster/values.yaml @@ -55,10 +55,23 @@ discovery: # if the ConfigMap was not changed. enableConfigMapRollout: true -# Whether enable TLS connections between server nodes. -# When enabled, PD/TiDB/TiKV/PUMP will use TLS encrypted connections to transfer data between each node, -# certificates will be generated automatically (if not already present). -enableTLSCluster: false +# Whether enable the TLS connection between TiDB server components +tlsCluster: + # The steps to enable this feature: + # 1. Generate TiDB server components certificates and a client-side certifiacete for them. + # There are multiple ways to generate these certificates: + # - user-provided certificates: https://pingcap.com/docs/stable/how-to/secure/generate-self-signed-certificates/ + # - use the K8s built-in certificate signing system signed certificates: https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/ + # - or use cert-manager signed certificates: https://cert-manager.io/ + # 2. Create one secret object for one component which contains the certificates created above. + # The name of this Secret must be: --cluster-secret. + # For PD: kubectl create secret generic -pd-cluster-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt= + # For TiKV: kubectl create secret generic -tikv-cluster-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt= + # For TiDB: kubectl create secret generic -tidb-cluster-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt= + # For Client: kubectl create secret generic -cluster-client-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt= + # Same for other components. + # 3. Then create the TiDB cluster with `tlsCluster.enabled` set to `true`. + enabled: false pd: # Please refer to https://github.com/pingcap/pd/blob/master/conf/config.toml for the default @@ -618,7 +631,7 @@ binlog: # pump configurations (change to the tags of your pump version), # just follow the format in the file and configure in the 'config' section # as below if you want to customize any configuration. - # [security] section will be generated automatically if enableTLSCluster is set to true so users do not need to configure it. + # [security] section will be generated automatically if tlsCluster.enabled is set to true so users do not need to configure it. # config: | # gc = 7 # heartbeat-interval = 2 diff --git a/charts/tikv-importer/.helmignore b/charts/tikv-importer/.helmignore new file mode 100644 index 00000000000..f0c13194444 --- /dev/null +++ b/charts/tikv-importer/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/tikv-importer/Chart.yaml b/charts/tikv-importer/Chart.yaml new file mode 100644 index 00000000000..0c24c3ce3eb --- /dev/null +++ b/charts/tikv-importer/Chart.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +description: A Helm chart for TiKV Importer +name: tikv-importer +version: dev +home: https://github.com/pingcap/tidb-operator +sources: + - https://github.com/pingcap/tidb-operator +keywords: + - newsql + - htap + - database + - mysql + - raft diff --git a/charts/tikv-importer/templates/_helpers.tpl b/charts/tikv-importer/templates/_helpers.tpl new file mode 100644 index 00000000000..2372f181e88 --- /dev/null +++ b/charts/tikv-importer/templates/_helpers.tpl @@ -0,0 +1,21 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "chart.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Encapsulate tikv-importer configmap data for consistent digest calculation +*/}} +{{- define "importer-configmap.data" -}} +config-file: |- + {{- if .Values.config }} +{{ .Values.config | indent 2 }} + {{- end -}} +{{- end -}} + +{{- define "importer-configmap.data-digest" -}} +{{ include "importer-configmap.data" . | sha256sum | trunc 8 }} +{{- end -}} diff --git a/charts/tikv-importer/templates/tikv-importer-configmap.yaml b/charts/tikv-importer/templates/tikv-importer-configmap.yaml new file mode 100644 index 00000000000..1bbcabafb01 --- /dev/null +++ b/charts/tikv-importer/templates/tikv-importer-configmap.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.clusterName }}-importer-{{ template "importer-configmap.data-digest" . }} + labels: + app.kubernetes.io/name: {{ template "chart.name" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: importer + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} +data: +{{ include "importer-configmap.data" . | indent 2 }} diff --git a/charts/tikv-importer/templates/tikv-importer-service.yaml b/charts/tikv-importer/templates/tikv-importer-service.yaml new file mode 100644 index 00000000000..07ded6f97a4 --- /dev/null +++ b/charts/tikv-importer/templates/tikv-importer-service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.clusterName }}-importer + labels: + app.kubernetes.io/name: {{ template "chart.name" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: importer + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} +spec: + clusterIP: None + ports: + - name: importer + port: 8287 + selector: + app.kubernetes.io/name: {{ template "chart.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: importer diff --git a/charts/tikv-importer/templates/tikv-importer-statefulset.yaml b/charts/tikv-importer/templates/tikv-importer-statefulset.yaml new file mode 100644 index 00000000000..18a6653ec36 --- /dev/null +++ b/charts/tikv-importer/templates/tikv-importer-statefulset.yaml @@ -0,0 +1,89 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ .Values.clusterName }}-importer + labels: + app.kubernetes.io/name: {{ template "chart.name" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: importer + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template "chart.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: importer + serviceName: {{ .Values.clusterName }}-importer + replicas: 1 + template: + metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/path: "/metrics" + prometheus.io/port: "9091" + labels: + app.kubernetes.io/name: {{ template "chart.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: importer + spec: + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 6 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 6 }} + {{- end }} + containers: + - name: importer + image: {{ .Values.image }} + imagePullPolicy: {{ .Values.imagePullPolicy | default "IfNotPresent"}} + command: + - /tikv-importer + # tikv-importer does not support domain name: https://github.com/tikv/importer/issues/16 + # - --addr=${MY_POD_NAME}.tikv-importer:8287 + - --addr=$(MY_POD_IP):8287 + - --config=/etc/tikv-importer/tikv-importer.toml + - --import-dir=/var/lib/tikv-importer + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: TZ + value: {{ .Values.timezone | default "UTC" }} + volumeMounts: + - name: data + mountPath: /var/lib/tikv-importer + - name: config + mountPath: /etc/tikv-importer + {{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 10 }} + {{- end }} + - name: pushgateway + image: {{ .Values.pushgatewayImage }} + imagePullPolicy: {{ .Values.pushgatewayImagePullPolicy | default "IfNotPresent" }} + volumes: + - name: config + configMap: + name: {{ .Values.clusterName }}-importer-{{ template "importer-configmap.data-digest" . }} + items: + - key: config-file + path: tikv-importer.toml + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: {{ .Values.storageClassName }} + resources: + requests: + storage: {{ .Values.storage }} diff --git a/charts/tikv-importer/values.yaml b/charts/tikv-importer/values.yaml new file mode 100644 index 00000000000..b0ac35eb5ec --- /dev/null +++ b/charts/tikv-importer/values.yaml @@ -0,0 +1,32 @@ +# Default values for tikv-importer. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# timezone is the default system timzone for TiDB +timezone: UTC + +# clusterName is the TiDB cluster name, if not specified, the chart release name will be used +clusterName: demo + +image: pingcap/tidb-lightning:v3.0.8 +imagePullPolicy: IfNotPresent +storageClassName: local-storage +storage: 20Gi +resources: + {} + # limits: + # cpu: 16000m + # memory: 8Gi + # requests: + # cpu: 16000m + # memory: 8Gi +affinity: {} +tolerations: [] +pushgatewayImage: prom/pushgateway:v0.3.1 +pushgatewayImagePullPolicy: IfNotPresent +config: | + log-level = "info" + [metric] + job = "tikv-importer" + interval = "15s" + address = "localhost:9091" diff --git a/deploy/modules/aws/tidb-cluster/variables.tf b/deploy/modules/aws/tidb-cluster/variables.tf index b561b3f4137..37283721358 100644 --- a/deploy/modules/aws/tidb-cluster/variables.tf +++ b/deploy/modules/aws/tidb-cluster/variables.tf @@ -131,7 +131,7 @@ variable "worker_group_launch_template_tags" { } variable "worker_ami_name_filter" { - description = "Additional name filter for AWS EKS worker AMI. Default behaviour will get latest for the cluster_version but could be set to a release from amazon-eks-ami, e.g. \"v20190220\"" + description = "Additional name filter for AWS EKS worker AMI. Default behaviour will get the latest AMI for the cluster_version, but it could be set to a specific version, e.g. \"v20190220\", please check the `Packer version` in https://docs.aws.amazon.com/eks/latest/userguide/eks-linux-ami-versions.html for the supported AMI versions." default = "v*" } diff --git a/manifests/crd.yaml b/manifests/crd.yaml index f727d95b67d..82f22358ebe 100644 --- a/manifests/crd.yaml +++ b/manifests/crd.yaml @@ -675,10 +675,6 @@ spec: description: 'Whether enable PVC reclaim for orphan PVC left by statefulset scale-in Optional: Defaults to false' type: boolean - enableTLSCluster: - description: 'Enable TLS connection between TiDB server components Optional: - Defaults to false' - type: boolean helper: description: HelperSpec contains details of helper component properties: @@ -6055,6 +6051,7 @@ spec: timezone: description: 'Time zone of TiDB cluster Pods Optional: Defaults to UTC' type: string + tlsCluster: {} tolerations: description: Base tolerations of TiDB cluster Pods, components may add more tolerations upon this respectively diff --git a/pkg/apis/pingcap/v1alpha1/defaulting/tidbcluster.go b/pkg/apis/pingcap/v1alpha1/defaulting/tidbcluster.go index 2c2fa69f38e..3d8e1cadac8 100644 --- a/pkg/apis/pingcap/v1alpha1/defaulting/tidbcluster.go +++ b/pkg/apis/pingcap/v1alpha1/defaulting/tidbcluster.go @@ -40,9 +40,8 @@ func setTidbClusterSpecDefault(tc *v1alpha1.TidbCluster) { if string(tc.Spec.ImagePullPolicy) == "" { tc.Spec.ImagePullPolicy = corev1.PullIfNotPresent } - if tc.Spec.EnableTLSCluster == nil { - d := false - tc.Spec.EnableTLSCluster = &d + if tc.Spec.TLSCluster == nil { + tc.Spec.TLSCluster = &v1alpha1.TLSCluster{Enabled: false} } if tc.Spec.EnablePVReclaim == nil { d := false diff --git a/pkg/apis/pingcap/v1alpha1/openapi_generated.go b/pkg/apis/pingcap/v1alpha1/openapi_generated.go index 5e77035bf68..28b1a09dc13 100644 --- a/pkg/apis/pingcap/v1alpha1/openapi_generated.go +++ b/pkg/apis/pingcap/v1alpha1/openapi_generated.go @@ -6118,11 +6118,10 @@ func schema_pkg_apis_pingcap_v1alpha1_TidbClusterSpec(ref common.ReferenceCallba Format: "", }, }, - "enableTLSCluster": { + "tlsCluster": { SchemaProps: spec.SchemaProps{ - Description: "Enable TLS connection between TiDB server components Optional: Defaults to false", - Type: []string{"boolean"}, - Format: "", + Description: "Whether enable the TLS connection between TiDB server components Optional: Defaults to nil", + Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TLSCluster"), }, }, "hostNetwork": { @@ -6200,7 +6199,7 @@ func schema_pkg_apis_pingcap_v1alpha1_TidbClusterSpec(ref common.ReferenceCallba }, }, Dependencies: []string{ - "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.HelperSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PumpSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVSpec", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Toleration"}, + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.HelperSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PumpSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TLSCluster", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVSpec", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Toleration"}, } } diff --git a/pkg/apis/pingcap/v1alpha1/tidbcluster.go b/pkg/apis/pingcap/v1alpha1/tidbcluster.go index ce3ce7f2705..a2f7449f210 100644 --- a/pkg/apis/pingcap/v1alpha1/tidbcluster.go +++ b/pkg/apis/pingcap/v1alpha1/tidbcluster.go @@ -300,11 +300,7 @@ func (tc *TidbCluster) GetClusterID() string { } func (tc *TidbCluster) IsTLSClusterEnabled() bool { - enableTLCluster := tc.Spec.EnableTLSCluster - if enableTLCluster == nil { - return defaultEnableTLSCluster - } - return *enableTLCluster + return tc.Spec.TLSCluster != nil && tc.Spec.TLSCluster.Enabled } func (tc *TidbCluster) Scheme() string { diff --git a/pkg/apis/pingcap/v1alpha1/types.go b/pkg/apis/pingcap/v1alpha1/types.go index f1244dfdc5c..1d6c22ada6e 100644 --- a/pkg/apis/pingcap/v1alpha1/types.go +++ b/pkg/apis/pingcap/v1alpha1/types.go @@ -150,10 +150,10 @@ type TidbClusterSpec struct { // +optional EnablePVReclaim *bool `json:"enablePVReclaim,omitempty"` - // Enable TLS connection between TiDB server components - // Optional: Defaults to false + // Whether enable the TLS connection between TiDB server components + // Optional: Defaults to nil // +optional - EnableTLSCluster *bool `json:"enableTLSCluster,omitempty"` + TLSCluster *TLSCluster `json:"tlsCluster,omitempty"` // Whether Hostnetwork is enabled for TiDB cluster Pods // Optional: Defaults to false @@ -618,6 +618,29 @@ type TiDBTLSClient struct { Enabled bool `json:"enabled,omitempty"` } +// TLSCluster can enable TLS connection between TiDB server components +// https://pingcap.com/docs/stable/how-to/secure/enable-tls-between-components/ +type TLSCluster struct { + // Enable mutual TLS authentication among TiDB components + // Once enabled, the mutual authentication applies to all components, + // and it does not support applying to only part of the components. + // The steps to enable this feature: + // 1. Generate TiDB server components certificates and a client-side certifiacete for them. + // There are multiple ways to generate these certificates: + // - user-provided certificates: https://pingcap.com/docs/stable/how-to/secure/generate-self-signed-certificates/ + // - use the K8s built-in certificate signing system signed certificates: https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/ + // - or use cert-manager signed certificates: https://cert-manager.io/ + // 2. Create one secret object for one component which contains the certificates created above. + // The name of this Secret must be: --cluster-secret. + // For PD: kubectl create secret generic -pd-cluster-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt= + // For TiKV: kubectl create secret generic -tikv-cluster-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt= + // For TiDB: kubectl create secret generic -tidb-cluster-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt= + // For Client: kubectl create secret generic -cluster-client-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt= + // Same for other components. + // +optional + Enabled bool `json:"enabled,omitempty"` +} + // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go index f660699e831..e8ed1ac39cb 100644 --- a/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go @@ -2215,6 +2215,22 @@ func (in StringSlice) DeepCopy() StringSlice { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSCluster) DeepCopyInto(out *TLSCluster) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSCluster. +func (in *TLSCluster) DeepCopy() *TLSCluster { + if in == nil { + return nil + } + out := new(TLSCluster) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TiDBAccessConfig) DeepCopyInto(out *TiDBAccessConfig) { *out = *in @@ -3999,9 +4015,9 @@ func (in *TidbClusterSpec) DeepCopyInto(out *TidbClusterSpec) { *out = new(bool) **out = **in } - if in.EnableTLSCluster != nil { - in, out := &in.EnableTLSCluster, &out.EnableTLSCluster - *out = new(bool) + if in.TLSCluster != nil { + in, out := &in.TLSCluster, &out.TLSCluster + *out = new(TLSCluster) **out = **in } if in.HostNetwork != nil { diff --git a/pkg/controller/tidb_control.go b/pkg/controller/tidb_control.go index 75c6ac975cc..bf6282cb3a8 100644 --- a/pkg/controller/tidb_control.go +++ b/pkg/controller/tidb_control.go @@ -15,16 +15,19 @@ package controller import ( "crypto/tls" + "crypto/x509" "encoding/json" "fmt" - "io/ioutil" - "net/http" - "time" - "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/httputil" - certutil "github.com/pingcap/tidb-operator/pkg/util/crypto" + "github.com/pingcap/tidb-operator/pkg/util" "github.com/pingcap/tidb/config" + "io/ioutil" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "net/http" + "time" ) const ( @@ -51,24 +54,33 @@ type TiDBControlInterface interface { // defaultTiDBControl is default implementation of TiDBControlInterface. type defaultTiDBControl struct { httpClient *http.Client + kubeCli kubernetes.Interface } // NewDefaultTiDBControl returns a defaultTiDBControl instance -func NewDefaultTiDBControl() TiDBControlInterface { - return &defaultTiDBControl{httpClient: &http.Client{Timeout: timeout}} +func NewDefaultTiDBControl(kubeCli kubernetes.Interface) TiDBControlInterface { + return &defaultTiDBControl{httpClient: &http.Client{Timeout: timeout}, kubeCli: kubeCli} } -func (tdc *defaultTiDBControl) useTLSHTTPClient(enableTLS bool) error { - if enableTLS { - rootCAs, err := certutil.ReadCACerts() - if err != nil { - return err - } - config := &tls.Config{ - RootCAs: rootCAs, - } - tdc.httpClient.Transport = &http.Transport{TLSClientConfig: config} +func (tdc *defaultTiDBControl) useTLSHTTPClient(tc *v1alpha1.TidbCluster) error { + if !tc.IsTLSClusterEnabled() { + return nil + } + + tcName := tc.Name + ns := tc.Namespace + secretName := util.ClusterClientTLSSecretName(tcName) + secret, err := tdc.kubeCli.CoreV1().Secrets(ns).Get(secretName, metav1.GetOptions{}) + if err != nil { + return err + } + + rootCAs := x509.NewCertPool() + rootCAs.AppendCertsFromPEM(secret.Data[v1.ServiceAccountRootCAKey]) + config := &tls.Config{ + RootCAs: rootCAs, } + tdc.httpClient.Transport = &http.Transport{TLSClientConfig: config} return nil } @@ -77,7 +89,7 @@ func (tdc *defaultTiDBControl) GetHealth(tc *v1alpha1.TidbCluster, ordinal int32 ns := tc.GetNamespace() scheme := tc.Scheme() - if err := tdc.useTLSHTTPClient(tc.IsTLSClusterEnabled()); err != nil { + if err := tdc.useTLSHTTPClient(tc); err != nil { return false, err } @@ -91,7 +103,7 @@ func (tdc *defaultTiDBControl) GetInfo(tc *v1alpha1.TidbCluster, ordinal int32) tcName := tc.GetName() ns := tc.GetNamespace() scheme := tc.Scheme() - if err := tdc.useTLSHTTPClient(tc.IsTLSClusterEnabled()); err != nil { + if err := tdc.useTLSHTTPClient(tc); err != nil { return nil, err } @@ -126,7 +138,7 @@ func (tdc *defaultTiDBControl) GetSettings(tc *v1alpha1.TidbCluster, ordinal int tcName := tc.GetName() ns := tc.GetNamespace() scheme := tc.Scheme() - if err := tdc.useTLSHTTPClient(tc.IsTLSClusterEnabled()); err != nil { + if err := tdc.useTLSHTTPClient(tc); err != nil { return nil, err } diff --git a/pkg/controller/tidbcluster/tidb_cluster_controller.go b/pkg/controller/tidbcluster/tidb_cluster_controller.go index 3de44bd02b6..0c0963c779a 100644 --- a/pkg/controller/tidbcluster/tidb_cluster_controller.go +++ b/pkg/controller/tidbcluster/tidb_cluster_controller.go @@ -96,7 +96,7 @@ func NewController( tcControl := controller.NewRealTidbClusterControl(cli, tcInformer.Lister(), recorder) pdControl := pdapi.NewDefaultPDControl(kubeCli) - tidbControl := controller.NewDefaultTiDBControl() + tidbControl := controller.NewDefaultTiDBControl(kubeCli) cmControl := controller.NewRealConfigMapControl(kubeCli, cmInformer.Lister(), recorder) setControl := controller.NewRealStatefuSetControl(kubeCli, setInformer.Lister(), recorder) svcControl := controller.NewRealServiceControl(kubeCli, svcInformer.Lister(), recorder) diff --git a/pkg/manager/member/pd_member_manager.go b/pkg/manager/member/pd_member_manager.go index f6ad7085886..0e430bffa48 100644 --- a/pkg/manager/member/pd_member_manager.go +++ b/pkg/manager/member/pd_member_manager.go @@ -206,16 +206,6 @@ func (pmm *pdMemberManager) syncPDStatefulSetForTidbCluster(tc *v1alpha1.TidbClu if err != nil { return err } - if tc.IsTLSClusterEnabled() { - err := pmm.syncPDServerCerts(tc) - if err != nil { - return err - } - err = pmm.syncPDClientCerts(tc) - if err != nil { - return err - } - } if err := pmm.setControl.CreateStatefulSet(tc, newPDSet); err != nil { return err } @@ -260,62 +250,6 @@ func (pmm *pdMemberManager) syncPDStatefulSetForTidbCluster(tc *v1alpha1.TidbClu return updateStatefulSet(pmm.setControl, tc, newPDSet, oldPDSet) } -func (pmm *pdMemberManager) syncPDClientCerts(tc *v1alpha1.TidbCluster) error { - ns := tc.GetNamespace() - tcName := tc.GetName() - commonName := fmt.Sprintf("%s-pd-client", tcName) - - hostList := []string{ - commonName, - } - - certOpts := &controller.TiDBClusterCertOptions{ - Namespace: ns, - Instance: tcName, - CommonName: commonName, - HostList: hostList, - Component: "pd", - Suffix: "pd-client", - } - - return pmm.certControl.Create(controller.GetOwnerRef(tc), certOpts) -} - -func (pmm *pdMemberManager) syncPDServerCerts(tc *v1alpha1.TidbCluster) error { - ns := tc.GetNamespace() - tcName := tc.GetName() - svcName := controller.PDMemberName(tcName) - peerName := controller.PDPeerMemberName(tcName) - - if pmm.certControl.CheckSecret(ns, svcName) { - return nil - } - - hostList := []string{ - svcName, - peerName, - fmt.Sprintf("%s.%s", svcName, ns), - fmt.Sprintf("%s.%s", peerName, ns), - fmt.Sprintf("*.%s.%s.svc", peerName, ns), - } - - ipList := []string{ - "127.0.0.1", "::1", // able to access https endpoint via loopback network - } - - certOpts := &controller.TiDBClusterCertOptions{ - Namespace: ns, - Instance: tcName, - CommonName: svcName, - HostList: hostList, - IPList: ipList, - Component: "pd", - Suffix: "pd", - } - - return pmm.certControl.Create(controller.GetOwnerRef(tc), certOpts) -} - func (pmm *pdMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, set *apps.StatefulSet) error { ns := tc.GetNamespace() tcName := tc.GetName() @@ -581,7 +515,7 @@ func getNewPDSetForTidbCluster(tc *v1alpha1.TidbCluster, cm *corev1.ConfigMap) ( vols = append(vols, corev1.Volume{ Name: "pd-tls", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: controller.PDMemberName(tcName), + SecretName: util.ClusterTLSSecretName(tc.Name, label.PDLabelVal), }, }, }) @@ -724,7 +658,7 @@ func getPDConfigMap(tc *v1alpha1.TidbCluster) (*corev1.ConfigMap, error) { if config.Security == nil { config.Security = &v1alpha1.PDSecurityConfig{} } - config.Security.CAPath = serviceAccountCAPath + config.Security.CAPath = path.Join(pdClusterCertPath, tlsSecretRootCAKey) config.Security.CertPath = path.Join(pdClusterCertPath, corev1.TLSCertKey) config.Security.KeyPath = path.Join(pdClusterCertPath, corev1.TLSPrivateKeyKey) } diff --git a/pkg/manager/member/tidb_member_manager.go b/pkg/manager/member/tidb_member_manager.go index e31c0c8ca81..74cca48f573 100644 --- a/pkg/manager/member/tidb_member_manager.go +++ b/pkg/manager/member/tidb_member_manager.go @@ -180,12 +180,6 @@ func (tmm *tidbMemberManager) syncTiDBStatefulSetForTidbCluster(tc *v1alpha1.Tid if err != nil { return err } - if tc.IsTLSClusterEnabled() { - err := tmm.syncTiDBClusterCerts(tc) - if err != nil { - return err - } - } err = tmm.setControl.CreateStatefulSet(tc, newTiDBSet) if err != nil { return err @@ -220,46 +214,6 @@ func (tmm *tidbMemberManager) syncTiDBStatefulSetForTidbCluster(tc *v1alpha1.Tid return updateStatefulSet(tmm.setControl, tc, newTiDBSet, oldTiDBSet) } -// syncTiDBClusterCerts creates the cert pair for TiDB if not exist, the cert -// pair is used to communicate with other TiDB components, like TiKVs and PDs -func (tmm *tidbMemberManager) syncTiDBClusterCerts(tc *v1alpha1.TidbCluster) error { - ns := tc.GetNamespace() - tcName := tc.GetName() - svcName := controller.TiDBMemberName(tcName) - peerName := controller.TiDBPeerMemberName(tcName) - - if tmm.certControl.CheckSecret(ns, svcName) { - return nil - } - - hostList := []string{ - svcName, - peerName, - fmt.Sprintf("%s.%s", svcName, ns), - fmt.Sprintf("%s.%s.svc", svcName, ns), - fmt.Sprintf("%s.%s", peerName, ns), - fmt.Sprintf("%s.%s.svc", peerName, ns), - fmt.Sprintf("*.%s.%s", peerName, ns), - fmt.Sprintf("*.%s.%s.svc", peerName, ns), - } - - ipList := []string{ - "127.0.0.1", "::1", // able to access https endpoint via loopback network - } - - certOpts := &controller.TiDBClusterCertOptions{ - Namespace: ns, - Instance: tcName, - CommonName: svcName, - HostList: hostList, - IPList: ipList, - Component: "tidb", - Suffix: "tidb", - } - - return tmm.certControl.Create(controller.GetOwnerRef(tc), certOpts) -} - func (tmm *tidbMemberManager) syncTiDBService(tc *v1alpha1.TidbCluster) error { newSvc := getNewTiDBServiceOrNil(tc) @@ -349,7 +303,7 @@ func getTiDBConfigMap(tc *v1alpha1.TidbCluster) (*corev1.ConfigMap, error) { if config.Security == nil { config.Security = &v1alpha1.Security{} } - config.Security.ClusterSSLCA = pointer.StringPtr(serviceAccountCAPath) + config.Security.ClusterSSLCA = pointer.StringPtr(path.Join(clusterCertPath, tlsSecretRootCAKey)) config.Security.ClusterSSLCert = pointer.StringPtr(path.Join(clusterCertPath, corev1.TLSCertKey)) config.Security.ClusterSSLKey = pointer.StringPtr(path.Join(clusterCertPath, corev1.TLSPrivateKeyKey)) } @@ -544,7 +498,7 @@ func getNewTiDBSetForTidbCluster(tc *v1alpha1.TidbCluster, cm *corev1.ConfigMap) vols = append(vols, corev1.Volume{ Name: "tidb-tls", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: controller.TiDBMemberName(tcName), + SecretName: util.ClusterTLSSecretName(tcName, label.TiDBLabelVal), }, }, }) diff --git a/pkg/manager/member/tidb_member_manager_test.go b/pkg/manager/member/tidb_member_manager_test.go index 550dadea1b5..88e46ae063b 100644 --- a/pkg/manager/member/tidb_member_manager_test.go +++ b/pkg/manager/member/tidb_member_manager_test.go @@ -1628,7 +1628,7 @@ func TestGetTiDBConfigMap(t *testing.T) { Namespace: "ns", }, Spec: v1alpha1.TidbClusterSpec{ - EnableTLSCluster: pointer.BoolPtr(true), + TLSCluster: &v1alpha1.TLSCluster{Enabled: true}, TiDB: v1alpha1.TiDBSpec{ ComponentSpec: v1alpha1.ComponentSpec{ ConfigUpdateStrategy: &updateStrategy, @@ -1669,7 +1669,7 @@ func TestGetTiDBConfigMap(t *testing.T) { ssl-ca = "/var/lib/tidb-server-tls/ca.crt" ssl-cert = "/var/lib/tidb-server-tls/tls.crt" ssl-key = "/var/lib/tidb-server-tls/tls.key" - cluster-ssl-ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + cluster-ssl-ca = "/var/lib/tidb-tls/ca.crt" cluster-ssl-cert = "/var/lib/tidb-tls/tls.crt" cluster-ssl-key = "/var/lib/tidb-tls/tls.key" `, diff --git a/pkg/manager/member/tikv_member_manager.go b/pkg/manager/member/tikv_member_manager.go index 0baac4124fc..29d3c712241 100644 --- a/pkg/manager/member/tikv_member_manager.go +++ b/pkg/manager/member/tikv_member_manager.go @@ -15,6 +15,7 @@ package member import ( "fmt" + "github.com/pingcap/tidb-operator/pkg/util" "path" "reflect" "regexp" @@ -193,12 +194,6 @@ func (tkmm *tikvMemberManager) syncStatefulSetForTidbCluster(tc *v1alpha1.TidbCl if err != nil { return err } - if tc.IsTLSClusterEnabled() { - err := tkmm.syncTiKVServerCerts(tc) - if err != nil { - return err - } - } err = tkmm.setControl.CreateStatefulSet(tc, newSet) if err != nil { return err @@ -236,34 +231,6 @@ func (tkmm *tikvMemberManager) syncStatefulSetForTidbCluster(tc *v1alpha1.TidbCl return updateStatefulSet(tkmm.setControl, tc, newSet, oldSet) } -func (tkmm *tikvMemberManager) syncTiKVServerCerts(tc *v1alpha1.TidbCluster) error { - ns := tc.GetNamespace() - tcName := tc.GetName() - svcName := controller.TiKVMemberName(tcName) - peerName := controller.TiKVPeerMemberName(tcName) - - if tkmm.certControl.CheckSecret(ns, svcName) { - return nil - } - - hostList := []string{ - peerName, - fmt.Sprintf("%s.%s", peerName, ns), - fmt.Sprintf("*.%s.%s.svc", peerName, ns), - } - - certOpts := &controller.TiDBClusterCertOptions{ - Namespace: ns, - Instance: tcName, - CommonName: svcName, - HostList: hostList, - Component: "tikv", - Suffix: "tikv", - } - - return tkmm.certControl.Create(controller.GetOwnerRef(tc), certOpts) -} - func (tkmm *tikvMemberManager) syncTiKVConfigMap(tc *v1alpha1.TidbCluster, set *apps.StatefulSet) (*corev1.ConfigMap, error) { // For backward compatibility, only sync tidb configmap when .tikv.config is non-nil if tc.Spec.TiKV.Config == nil { @@ -366,7 +333,7 @@ func getNewTiKVSetForTidbCluster(tc *v1alpha1.TidbCluster, cm *corev1.ConfigMap) vols = append(vols, corev1.Volume{ Name: "tikv-tls", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: controller.TiKVMemberName(tcName), + SecretName: util.ClusterTLSSecretName(tc.Name, label.TiKVLabelVal), }, }, }) @@ -538,7 +505,7 @@ func getTikVConfigMap(tc *v1alpha1.TidbCluster) (*corev1.ConfigMap, error) { if config.Security == nil { config.Security = &v1alpha1.TiKVSecurityConfig{} } - config.Security.CAPath = serviceAccountCAPath + config.Security.CAPath = path.Join(tikvClusterCertPath, tlsSecretRootCAKey) config.Security.CertPath = path.Join(tikvClusterCertPath, corev1.TLSCertKey) config.Security.KeyPath = path.Join(tikvClusterCertPath, corev1.TLSPrivateKeyKey) } diff --git a/pkg/manager/member/utils.go b/pkg/manager/member/utils.go index 7c451d26afc..04b66bb88dc 100644 --- a/pkg/manager/member/utils.go +++ b/pkg/manager/member/utils.go @@ -306,3 +306,7 @@ func updateStatefulSet(setCtl controller.StatefulSetControlInterface, tc *v1alph return nil } + +func clusterSecretName(tc *v1alpha1.TidbCluster, component string) string { + return fmt.Sprintf("%s-%s-cluster-secret", tc.Name, component) +} diff --git a/pkg/monitor/monitor/template.go b/pkg/monitor/monitor/template.go index 49bea641127..f7d4ee6efd6 100644 --- a/pkg/monitor/monitor/template.go +++ b/pkg/monitor/monitor/template.go @@ -14,12 +14,16 @@ package monitor import ( - "time" - + "fmt" + "github.com/pingcap/tidb-operator/pkg/label" + "github.com/pingcap/tidb-operator/pkg/util" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/config" "gopkg.in/yaml.v2" + corev1 "k8s.io/api/core/v1" "k8s.io/klog" + "path" + "time" ) const ( @@ -32,9 +36,6 @@ const ( podNameLabel = "__meta_kubernetes_pod_name" nodeNameLabel = "__meta_kubernetes_pod_node_name" podIPLabel = "__meta_kubernetes_pod_ip" - caFilePath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" - certFilePath = "/var/lib/pd-client-tls/tls.crt" - keyFilePath = "/var/lib/pd-client-tls/tls.key" ) var ( @@ -44,6 +45,7 @@ var ( tikvPattern config.Regexp pdPattern config.Regexp tidbPattern config.Regexp + addressPattern config.Regexp dashBoardConfig = `{ "apiVersion": 1, "providers": [ @@ -86,6 +88,10 @@ func init() { if err != nil { klog.Fatalf("monitor regex template parse error,%v", err) } + addressPattern, err = config.NewRegexp("(.+);(.+);(.+)") + if err != nil { + klog.Fatalf("monitor regex template parse error,%v", err) + } } type MonitorConfigModel struct { @@ -119,10 +125,35 @@ func newPrometheusConfig(cmodel *MonitorConfigModel) *config.Config { } func scrapeJob(name string, componentPattern config.Regexp, cmodel *MonitorConfigModel) *config.ScrapeConfig { + + addressRelabelConfig := &config.RelabelConfig{ + SourceLabels: model.LabelNames{ + "__address__", + ioPortLabel, + }, + Action: config.RelabelReplace, + Regex: portPattern, + Replacement: "$1:$2", + TargetLabel: "__address__", + } + if name == label.PDLabelVal || name == label.TiDBLabelVal || name == label.TiKVLabelVal { + addressRelabelConfig = &config.RelabelConfig{ + SourceLabels: model.LabelNames{ + podNameLabel, + instanceLabel, + ioPortLabel, + }, + Action: config.RelabelReplace, + Regex: addressPattern, + Replacement: fmt.Sprintf("$1.$2-%s-peer:$3", name), + TargetLabel: "__address__", + } + } return &config.ScrapeConfig{ JobName: name, ScrapeInterval: model.Duration(15 * time.Second), + Scheme: "http", HonorLabels: true, ServiceDiscoveryConfig: config.ServiceDiscoveryConfig{ KubernetesSDConfigs: []*config.KubernetesSDConfig{ @@ -138,9 +169,6 @@ func scrapeJob(name string, componentPattern config.Regexp, cmodel *MonitorConfi TLSConfig: config.TLSConfig{ InsecureSkipVerify: true, }, - XXX: map[string]interface{}{ - "scheme": "http", - }, }, RelabelConfigs: []*config.RelabelConfig{ { @@ -172,16 +200,7 @@ func scrapeJob(name string, componentPattern config.Regexp, cmodel *MonitorConfi TargetLabel: "__metrics_path__", Regex: allMatchPattern, }, - { - SourceLabels: model.LabelNames{ - "__address__", - ioPortLabel, - }, - Action: config.RelabelReplace, - Regex: portPattern, - Replacement: "$1:$2", - TargetLabel: "__address__", - }, + addressRelabelConfig, { SourceLabels: model.LabelNames{ namespaceLabel, @@ -249,12 +268,12 @@ func addTlsConfig(pc *config.Config) { // And we should fix it after TiKV fix this issue: https://github.com/tikv/tikv/issues/5340 if sconfig.JobName == "pd" || sconfig.JobName == "tidb" { sconfig.HTTPClientConfig.TLSConfig = config.TLSConfig{ - CAFile: caFilePath, - CertFile: certFilePath, - KeyFile: keyFilePath, + CAFile: path.Join(util.ClusterClientTLSPath, corev1.ServiceAccountRootCAKey), + CertFile: path.Join(util.ClusterClientTLSPath, corev1.TLSCertKey), + KeyFile: path.Join(util.ClusterClientTLSPath, corev1.TLSPrivateKeyKey), } pc.ScrapeConfigs[id] = sconfig - sconfig.HTTPClientConfig.XXX["scheme"] = "https" + sconfig.Scheme = "https" } } } diff --git a/pkg/monitor/monitor/template_test.go b/pkg/monitor/monitor/template_test.go index d6b08bcbe37..4c4825487ca 100644 --- a/pkg/monitor/monitor/template_test.go +++ b/pkg/monitor/monitor/template_test.go @@ -31,6 +31,7 @@ scrape_configs: - job_name: pd honor_labels: true scrape_interval: 15s + scheme: http kubernetes_sd_configs: - api_server: null role: pod @@ -54,10 +55,11 @@ scrape_configs: regex: (.+) target_label: __metrics_path__ action: replace - - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] - regex: ([^:]+)(?::\d+)?;(\d+) + - source_labels: [__meta_kubernetes_pod_name, __meta_kubernetes_pod_label_app_kubernetes_io_instance, + __meta_kubernetes_pod_annotation_prometheus_io_port] + regex: (.+);(.+);(.+) target_label: __address__ - replacement: $1:$2 + replacement: $1.$2-pd-peer:$3 action: replace - source_labels: [__meta_kubernetes_namespace] target_label: kubernetes_namespace @@ -77,6 +79,7 @@ scrape_configs: - job_name: tidb honor_labels: true scrape_interval: 15s + scheme: http kubernetes_sd_configs: - api_server: null role: pod @@ -100,10 +103,11 @@ scrape_configs: regex: (.+) target_label: __metrics_path__ action: replace - - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] - regex: ([^:]+)(?::\d+)?;(\d+) + - source_labels: [__meta_kubernetes_pod_name, __meta_kubernetes_pod_label_app_kubernetes_io_instance, + __meta_kubernetes_pod_annotation_prometheus_io_port] + regex: (.+);(.+);(.+) target_label: __address__ - replacement: $1:$2 + replacement: $1.$2-tidb-peer:$3 action: replace - source_labels: [__meta_kubernetes_namespace] target_label: kubernetes_namespace @@ -123,6 +127,7 @@ scrape_configs: - job_name: tikv honor_labels: true scrape_interval: 15s + scheme: http kubernetes_sd_configs: - api_server: null role: pod @@ -146,10 +151,11 @@ scrape_configs: regex: (.+) target_label: __metrics_path__ action: replace - - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] - regex: ([^:]+)(?::\d+)?;(\d+) + - source_labels: [__meta_kubernetes_pod_name, __meta_kubernetes_pod_label_app_kubernetes_io_instance, + __meta_kubernetes_pod_annotation_prometheus_io_port] + regex: (.+);(.+);(.+) target_label: __address__ - replacement: $1:$2 + replacement: $1.$2-tikv-peer:$3 action: replace - source_labels: [__meta_kubernetes_namespace] target_label: kubernetes_namespace @@ -179,3 +185,176 @@ scrape_configs: g.Expect(err).NotTo(HaveOccurred()) g.Expect(content).Should(Equal(expectedContent)) } + +func TestRenderPrometheusConfigTLSEnabled(t *testing.T) { + g := NewGomegaWithT(t) + target, _ := config.NewRegexp("target") + expectedContent := `global: + scrape_interval: 15s + evaluation_interval: 15s +rule_files: +- /prometheus-rules/rules/*.rules.yml +scrape_configs: +- job_name: pd + honor_labels: true + scrape_interval: 15s + scheme: https + kubernetes_sd_configs: + - api_server: null + role: pod + namespaces: + names: + - ns1 + - ns2 + tls_config: + ca_file: /var/lib/cluster-client-tls/ca.crt + cert_file: /var/lib/cluster-client-tls/tls.crt + key_file: /var/lib/cluster-client-tls/tls.key + insecure_skip_verify: false + relabel_configs: + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance] + regex: target + action: keep + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_component] + regex: pd + action: keep + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + regex: "true" + action: keep + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + regex: (.+) + target_label: __metrics_path__ + action: replace + - source_labels: [__meta_kubernetes_pod_name, __meta_kubernetes_pod_label_app_kubernetes_io_instance, + __meta_kubernetes_pod_annotation_prometheus_io_port] + regex: (.+);(.+);(.+) + target_label: __address__ + replacement: $1.$2-pd-peer:$3 + action: replace + - source_labels: [__meta_kubernetes_namespace] + target_label: kubernetes_namespace + action: replace + - source_labels: [__meta_kubernetes_pod_name] + target_label: instance + action: replace + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance] + target_label: cluster + action: replace + - source_labels: [__meta_kubernetes_pod_name] + target_label: instance + action: replace + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance] + target_label: cluster + action: replace +- job_name: tidb + honor_labels: true + scrape_interval: 15s + scheme: https + kubernetes_sd_configs: + - api_server: null + role: pod + namespaces: + names: + - ns1 + - ns2 + tls_config: + ca_file: /var/lib/cluster-client-tls/ca.crt + cert_file: /var/lib/cluster-client-tls/tls.crt + key_file: /var/lib/cluster-client-tls/tls.key + insecure_skip_verify: false + relabel_configs: + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance] + regex: target + action: keep + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_component] + regex: tidb + action: keep + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + regex: "true" + action: keep + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + regex: (.+) + target_label: __metrics_path__ + action: replace + - source_labels: [__meta_kubernetes_pod_name, __meta_kubernetes_pod_label_app_kubernetes_io_instance, + __meta_kubernetes_pod_annotation_prometheus_io_port] + regex: (.+);(.+);(.+) + target_label: __address__ + replacement: $1.$2-tidb-peer:$3 + action: replace + - source_labels: [__meta_kubernetes_namespace] + target_label: kubernetes_namespace + action: replace + - source_labels: [__meta_kubernetes_pod_name] + target_label: instance + action: replace + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance] + target_label: cluster + action: replace + - source_labels: [__meta_kubernetes_pod_name] + target_label: instance + action: replace + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance] + target_label: cluster + action: replace +- job_name: tikv + honor_labels: true + scrape_interval: 15s + scheme: http + kubernetes_sd_configs: + - api_server: null + role: pod + namespaces: + names: + - ns1 + - ns2 + tls_config: + insecure_skip_verify: true + relabel_configs: + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance] + regex: target + action: keep + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_component] + regex: tikv + action: keep + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + regex: "true" + action: keep + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + regex: (.+) + target_label: __metrics_path__ + action: replace + - source_labels: [__meta_kubernetes_pod_name, __meta_kubernetes_pod_label_app_kubernetes_io_instance, + __meta_kubernetes_pod_annotation_prometheus_io_port] + regex: (.+);(.+);(.+) + target_label: __address__ + replacement: $1.$2-tikv-peer:$3 + action: replace + - source_labels: [__meta_kubernetes_namespace] + target_label: kubernetes_namespace + action: replace + - source_labels: [__meta_kubernetes_pod_name] + target_label: instance + action: replace + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance] + target_label: cluster + action: replace + - source_labels: [__meta_kubernetes_pod_name] + target_label: instance + action: replace + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance] + target_label: cluster + action: replace +` + model := &MonitorConfigModel{ + ReleaseTargetRegex: &target, + ReleaseNamespaces: []string{ + "ns1", + "ns2", + }, + EnableTLSCluster: true, + } + content, err := RenderPrometheusConfig(model) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(content).Should(Equal(expectedContent)) +} diff --git a/pkg/monitor/monitor/util.go b/pkg/monitor/monitor/util.go index e319f41df23..7aa15cd3b3a 100644 --- a/pkg/monitor/monitor/util.go +++ b/pkg/monitor/monitor/util.go @@ -16,6 +16,7 @@ package monitor import ( "encoding/json" "fmt" + "github.com/pingcap/tidb-operator/pkg/util" "strconv" "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" @@ -426,8 +427,8 @@ func getMonitorPrometheusContainer(monitor *v1alpha1.TidbMonitor, tc *v1alpha1.T if tc.IsTLSClusterEnabled() { c.VolumeMounts = append(c.VolumeMounts, core.VolumeMount{ - Name: "tls-pd-client", - MountPath: "/var/lib/pd-client-tls", + Name: "cluster-client-tls", + MountPath: util.ClusterClientTLSPath, ReadOnly: true, }) } @@ -635,10 +636,10 @@ func getMonitorVolumes(config *core.ConfigMap, monitor *v1alpha1.TidbMonitor, tc if tc.IsTLSClusterEnabled() { defaultMode := int32(420) tlsPDClient := core.Volume{ - Name: "tls-pd-client", + Name: "cluster-client-tls", VolumeSource: core.VolumeSource{ Secret: &core.SecretVolumeSource{ - SecretName: fmt.Sprintf("%s-pd-client", tc.Name), + SecretName: util.ClusterClientTLSSecretName(tc.Name), DefaultMode: &defaultMode, }, }, diff --git a/pkg/pdapi/pdapi.go b/pkg/pdapi/pdapi.go index 2702cc65ab0..32b9371c270 100644 --- a/pkg/pdapi/pdapi.go +++ b/pkg/pdapi/pdapi.go @@ -19,6 +19,7 @@ import ( "crypto/x509" "encoding/json" "fmt" + "github.com/pingcap/tidb-operator/pkg/util" "io/ioutil" "net/http" "strings" @@ -33,7 +34,6 @@ import ( "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/pd/pkg/typeutil" "github.com/pingcap/tidb-operator/pkg/httputil" - certutil "github.com/pingcap/tidb-operator/pkg/util/crypto" types "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" ) @@ -66,23 +66,19 @@ func NewDefaultPDControl(kubeCli kubernetes.Interface) PDControlInterface { // GetTLSConfig returns *tls.Config for given TiDB cluster. // It loads in-cluster root ca if caCert is empty. func GetTLSConfig(kubeCli kubernetes.Interface, namespace Namespace, tcName string, caCert []byte) (*tls.Config, error) { - secretName := fmt.Sprintf("%s-pd-client", tcName) + secretName := util.ClusterClientTLSSecretName(tcName) secret, err := kubeCli.CoreV1().Secrets(string(namespace)).Get(secretName, types.GetOptions{}) if err != nil { return nil, fmt.Errorf("unable to load certificates from secret %s/%s: %v", namespace, secretName, err) } - var rootCAs *x509.CertPool + rootCAs := x509.NewCertPool() var tlsCert tls.Certificate if len(caCert) > 0 { - rootCAs = x509.NewCertPool() rootCAs.AppendCertsFromPEM(caCert) } else { - rootCAs, err = certutil.ReadCACerts() - if err != nil { - return nil, err - } + rootCAs.AppendCertsFromPEM(secret.Data[v1.ServiceAccountRootCAKey]) } clientCert, certExists := secret.Data[v1.TLSCertKey] diff --git a/pkg/util/crypto/certs.go b/pkg/util/crypto/certs.go index 318d9b3187c..c149ef5865a 100644 --- a/pkg/util/crypto/certs.go +++ b/pkg/util/crypto/certs.go @@ -16,11 +16,9 @@ package crypto import ( "crypto/rand" "crypto/rsa" - "crypto/tls" "crypto/x509" "crypto/x509/pkix" "encoding/pem" - "fmt" "io/ioutil" "net" @@ -105,18 +103,3 @@ func ReadCACerts() (*x509.CertPool, error) { } return rootCAs, nil } - -func LoadCerts(cert []byte, key []byte) (*x509.CertPool, tls.Certificate, error) { - if cert == nil || key == nil { - return nil, tls.Certificate{}, fmt.Errorf("fail to load certs, cert and key can not be empty") - } - - rootCAs, err := ReadCACerts() - if err != nil { - return rootCAs, tls.Certificate{}, err - } - - // load client cert - tlsCert, err := tls.X509KeyPair(cert, key) - return rootCAs, tlsCert, err -} diff --git a/pkg/util/util.go b/pkg/util/util.go index 6dd83b57cbb..f1b571f4aca 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -28,6 +28,10 @@ import ( "k8s.io/apimachinery/pkg/util/sets" ) +var ( + ClusterClientTLSPath = "/var/lib/cluster-client-tls" +) + func GetOrdinalFromPodName(podName string) (int32, error) { ordinalStr := podName[strings.LastIndex(podName, "-")+1:] ordinalInt, err := strconv.Atoi(ordinalStr) @@ -163,3 +167,11 @@ func Encode(obj interface{}) (string, error) { } return string(b), nil } + +func ClusterClientTLSSecretName(tcName string) string { + return fmt.Sprintf("%s-cluster-client-secret", tcName) +} + +func ClusterTLSSecretName(tcName, component string) string { + return fmt.Sprintf("%s-%s-cluster-secret", tcName, component) +} diff --git a/tests/actions.go b/tests/actions.go index d25d693402d..947c0820aed 100644 --- a/tests/actions.go +++ b/tests/actions.go @@ -124,7 +124,7 @@ func NewOperatorActions(cli versioned.Interface, framework.ExpectNoError(err) oa.tidbControl = proxiedtidbclient.NewProxiedTiDBClient(fw, kubeCfg.TLSClientConfig.CAData) } else { - oa.tidbControl = controller.NewDefaultTiDBControl() + oa.tidbControl = controller.NewDefaultTiDBControl(kubeCli) } oa.clusterEvents = make(map[string]*clusterEvent) for _, c := range clusters { diff --git a/tests/e2e/tidbcluster/tidbcluster.go b/tests/e2e/tidbcluster/tidbcluster.go index b450875b738..b5d2ec7b47b 100644 --- a/tests/e2e/tidbcluster/tidbcluster.go +++ b/tests/e2e/tidbcluster/tidbcluster.go @@ -142,13 +142,6 @@ var _ = ginkgo.Describe("[tidb-operator] TiDBCluster", func() { "tikv.resources.limits.storage": "1G", }, }, - { - Version: utilimage.TiDBTLSVersion, - Name: "basic-v3-cluster-tls", - Values: map[string]string{ - "enableTLSCluster": "true", - }, - }, } for _, clusterCfg := range clusterCfgs {