diff --git a/charts/add-ons/prometheus/.helmignore b/charts/add-ons/prometheus/.helmignore new file mode 100644 index 0000000000000..50af031725419 --- /dev/null +++ b/charts/add-ons/prometheus/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/add-ons/prometheus/Chart.yaml b/charts/add-ons/prometheus/Chart.yaml new file mode 100644 index 0000000000000..220f27dacda88 --- /dev/null +++ b/charts/add-ons/prometheus/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for the prometheus add-on in Linkerd +name: prometheus +version: 0.1.0 +maintainers: + - name: Linkerd authors + email: cncf-linkerd-dev@lists.cncf.io + url: https://linkerd.io/ diff --git a/charts/add-ons/prometheus/requirements.yaml b/charts/add-ons/prometheus/requirements.yaml new file mode 100644 index 0000000000000..b5d3df3884f5b --- /dev/null +++ b/charts/add-ons/prometheus/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: partials + version: 0.1.0 + repository: file://../../partials diff --git a/charts/linkerd2/templates/prometheus-rbac.yaml b/charts/add-ons/prometheus/templates/prometheus-rbac.yaml similarity index 100% rename from charts/linkerd2/templates/prometheus-rbac.yaml rename to charts/add-ons/prometheus/templates/prometheus-rbac.yaml diff --git a/charts/linkerd2/templates/prometheus.yaml b/charts/add-ons/prometheus/templates/prometheus.yaml similarity index 87% rename from charts/linkerd2/templates/prometheus.yaml rename to charts/add-ons/prometheus/templates/prometheus.yaml index bb997ab938ae7..820444ae59209 100644 --- a/charts/linkerd2/templates/prometheus.yaml +++ b/charts/add-ons/prometheus/templates/prometheus.yaml @@ -15,15 +15,10 @@ metadata: {{.Values.global.createdByAnnotation}}: {{default (printf "linkerd/helm %s" .Values.global.linkerdVersion) .Values.global.cliVersion}} data: prometheus.yml: |- - {{- if .Values.prometheusAlertmanagers }} - alerting: - alertmanagers: - {{- toYaml .Values.prometheusAlertmanagers | trim | nindent 8 }} - {{- end }} global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + {{- if .Values.globalConfig -}} + {{- toYaml .Values.globalConfig | trim | nindent 6 }} + {{- end}} rule_files: - /etc/prometheus/*_rules.yml @@ -34,7 +29,6 @@ data: static_configs: - targets: ['localhost:9090'] - {{ if .Values.grafana.enabled -}} - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -45,7 +39,6 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - {{- end}} # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' @@ -54,7 +47,6 @@ data: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt insecure_skip_verify: true bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - kubernetes_sd_configs: - role: node relabel_configs: @@ -153,6 +145,27 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + + {{- if .Values.scrapeConfigs }} + {{- toYaml .Values.scrapeConfigs | trim | nindent 4 }} + {{- end }} + + {{- if (or .Values.alertManagers .Values.alertRelabelConfigs) }} + alerting: + alert_relabel_configs: + {{- if .Values.alertRelabelConfigs }} + {{- toYaml .Values.alertRelabelConfigs | trim | nindent 6 }} + {{- end }} + alertmanagers: + {{- if .Values.alertManagers }} + {{- toYaml .Values.alertManagers | trim | nindent 6 }} + {{- end }} + {{- end }} + + {{- if .Values.remoteWrite }} + remote_write: + {{- toYaml .Values.remoteWrite | trim | nindent 4 }} + {{- end }} --- kind: Service apiVersion: v1 @@ -191,7 +204,7 @@ metadata: namespace: {{.Values.global.namespace}} spec: replicas: 1 - {{- if .Values.prometheusPersistence.enabled }} + {{- if .Values.persistence }} strategy: type: Recreate {{- end }} @@ -219,14 +232,10 @@ spec: fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level={{lower .Values.prometheusLogLevel}} - {{- range $key, $value := .Values.prometheusExtraArgs}} + {{- range $key, $value := .Values.args}} - --{{ $key }}{{ if $value }}={{ $value }}{{ end }} {{- end }} - image: {{.Values.prometheusImage}} + image: {{.Values.image}} imagePullPolicy: {{.Values.global.imagePullPolicy}} livenessProbe: httpGet: @@ -244,15 +253,15 @@ spec: port: 9090 initialDelaySeconds: 30 timeoutSeconds: 30 - {{- if .Values.prometheusResources -}} - {{- include "partials.resources" .Values.prometheusResources | nindent 8 }} + {{- if .Values.resources -}} + {{- include "partials.resources" .Values.resources | nindent 8 }} {{- end }} securityContext: runAsNonRoot: true runAsUser: 65534 runAsGroup: 65534 volumeMounts: - {{- range .Values.prometheusRuleConfigMapMounts }} + {{- range .Values.ruleConfigMapMounts }} - name: {{ .name }} mountPath: /etc/prometheus/{{ .subPath }} subPath: {{ .subPath }} @@ -264,11 +273,13 @@ spec: name: prometheus-config subPath: prometheus.yml readOnly: true - {{- $tree := deepCopy . }} - {{- if not (empty .Values.prometheusProxyResources) }} - {{- $r := merge .Values.prometheusProxyResources .Values.global.proxy.resources }} + {{- $tree := deepCopy . }} + {{- if not (empty .Values.proxy) }} + {{- if not (empty .Values.proxy.resources) }} + {{- $r := merge .Values.proxy.resources .Values.global.proxy.resources }} {{- $_ := set $tree.Values.global.proxy "resources" $r }} {{- end }} + {{- end }} - {{- include "partials.proxy" $tree | indent 8 | trimPrefix (repeat 7 " ") }} {{ if not .Values.global.cniEnabled -}} initContainers: @@ -276,13 +287,13 @@ spec: {{ end -}} serviceAccountName: linkerd-prometheus volumes: - {{- range .Values.prometheusRuleConfigMapMounts }} + {{- range .Values.ruleConfigMapMounts }} - name: {{ .name }} configMap: name: {{ .configMap }} {{- end }} - name: data - {{- if .Values.prometheusPersistence.enabled }} + {{- if .Values.persistence }} persistentVolumeClaim: claimName: linkerd-prometheus {{- else }} @@ -298,7 +309,7 @@ spec: - {{- include "partials.proxyInit.volumes.xtables" . | indent 8 | trimPrefix (repeat 7 " ") }} {{ end -}} - {{- include "partials.proxy.volumes.identity" . | indent 8 | trimPrefix (repeat 7 " ") }} -{{- if .Values.prometheusPersistence.enabled }} +{{- if .Values.persistence }} --- kind: PersistentVolumeClaim apiVersion: v1 @@ -312,11 +323,11 @@ metadata: namespace: {{.Values.global.namespace}} spec: accessModes: - - {{ .Values.prometheusPersistence.accessMode | quote }} + - {{ .Values.persistence.accessMode | quote }} resources: requests: - storage: {{ .Values.prometheusPersistence.size | quote }} -{{- if .Values.prometheusPersistence.storageClass }} - storageClassName: "{{ .Values.prometheusPersistence.storageClass }}" + storage: {{ .Values.persistence.size | quote }} +{{- if .Values.persistence.storageClass }} + storageClassName: "{{ .Values.persistence.storageClass }}" {{- end }} {{- end }} diff --git a/charts/add-ons/prometheus/values.yaml b/charts/add-ons/prometheus/values.yaml new file mode 100644 index 0000000000000..5a91f49381c8b --- /dev/null +++ b/charts/add-ons/prometheus/values.yaml @@ -0,0 +1,15 @@ +# This add-on's default property values are declared in `charts/add-ons/prometheus/values.yaml`. +# If installing/upgrading with Helm, you can override them through the usual `--set` or `-f` flags +# when installing with the parent linkerd2 chart +# Do not override them in this file! +# If installing/upgrading with linkerd's CLI, use the `--addon-config` flag. +image: prom/prometheus:v2.15.2 +args: + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + config.file: /etc/prometheus/prometheus.yml + log.level: info +globalConfig: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s \ No newline at end of file diff --git a/charts/linkerd2/README.md b/charts/linkerd2/README.md index a5c0bc1cf685a..58ee8e5e4a34b 100644 --- a/charts/linkerd2/README.md +++ b/charts/linkerd2/README.md @@ -154,17 +154,6 @@ their default values. | `identityPoxyResources` | CPU and Memory resources required by proxy injected into identity pod (see `global.proxy.resources` for sub-fields) | values in `global.proxy.resources` | | `installNamespace` | Set to false when installing Linkerd in a custom namespace. See the [Linkerd documentation](https://linkerd.io/2/tasks/install-helm/#customizing-the-namespace) for more information. | `true` | | `omitWebhookSideEffects` | Omit the `sideEffects` flag in the webhook manifests | `false` | -| `prometheusAlertmanagers` | Alertmanager instances the Prometheus server sends alerts to configured via the static_configs parameter. | `[]` | -| `prometheusExtraArgs` | Extra command line options for Prometheus | `{}` | -| `prometheusImage` | Docker image for the Prometheus container | `prom/prometheus:v2.15.2` | -| `prometheusLogLevel` | Log level for Prometheus | `info` | -| `prometheusResources` | CPU and Memory resources required by prometheus (see `global.proxy.resources` for sub-fields) | | -| `prometheusProxyResources` | CPU and Memory resources required by proxy injected into prometheus pod (see `global.proxy.resources` for sub-fields) | values in `global.proxy.resources` | -| `prometheusPersistence.enabled` | Boolean value to enable creation and mounting of PVC for prometheus data. | `false` | -| `prometheusPersistence.storageClass` | Storage class used to create prometheus data PV. | `nil` | -| `prometheusPersistence.accessMode` | PVC access mode. | `ReadWriteOnce` | -| `prometheusPersistence.size` | Prometheus data volume size. | `8Gi` | -| `prometheusRuleConfigMapMounts` | Alerting/recording rule ConfigMap mounts (sub-path names must end in `_rules.yml` or `_rules.yaml`) | `[]` | | `proxyInjector.externalSecret` | Do not create a secret resource for the profileValidator webhook. If this is set to `true`, the value `proxyInjector.caBundle` must be set (see below). | false | | `proxyInjector.crtPEM` | Certificate for the proxy injector. If not provided then Helm will generate one. | | | `proxyInjector.keyPEM` | Certificate key for the proxy injector. If not provided then Helm will generate one. | | @@ -218,6 +207,32 @@ The following table lists the configurable parameters for the Grafana Add-On. | `grafana.resources.memory.request` | Amount of memory that the grafana container requests || | `grafana.proxy.resources` | Structure analog to the `resources` fields above, but overriding the resources of the linkerd proxy injected into the grafana pod. | values in `global.proxy.resources` of the linkerd2 chart. | +### Prometheus Add-On + +The following table lists the configurable parameters for the Prometheus Add-On. + +| Parameter | Description | Default | +|:--------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------| +| `prometheus.enabled` | Flag to enable prometheus instance to be installed | `true` | +| `prometheus.alert_relabel_configs` | Alert relabeling is applied to alerts before they are sent to the Alertmanager. | `[]` | +| `prometheus.alertManagers` | Alertmanager instances the Prometheus server sends alerts to configured via the static_configs parameter. | `[]` | +| `prometheus.args` | Command line options for Prometheus binary | `storage.tsdb.path: /data, storage.tsdb.retention.time: 6h, config.file: /etc/prometheus/prometheus.yml, log.level: *controller_log_level` | +| `prometheus.globalConfig` | The global configuration specifies parameters that are valid in all other configuration contexts. | `scrape_interval: 10s, scrape_timeout: 10s, evaluation_interval: 10s` | +| `prometheus.image` | Docker image for the prometheus instance | `prom/prometheus:v2.15.2` | +| `prometheus.proxy.resources` | CPU and Memory resources required by proxy injected into prometheus pod (see `global.proxy.resources` for sub-fields) | values in `global.proxy.resources` | +| `prometheus.persistence.storageClass` | Storage class used to create prometheus data PV. | `nil` | +| `prometheus.persistence.accessMode` | PVC access mode. | `ReadWriteOnce` | +| `prometheus.persistence.size` | Prometheus data volume size. | `8Gi` | +| `prometheus.resources.cpu.limit` | Maximum amount of CPU units that the prometheus container can use || +| `prometheus.resources.cpu.request` | Amount of CPU units that the prometheus container requests || +| `prometheus.resources.memory.limit` | Maximum amount of memory that prometheus container can use || +| `prometheus.resources.memory.request` | Amount of memory that the prometheus container requests || +| `prometheus.ruleConfigMapMounts` | Alerting/recording rule ConfigMap mounts (sub-path names must end in `_rules.yml` or `_rules.yaml`) | `[]` | +| `prometheus.scrapeConfigs` | A scrape_config section specifies a set of targets and parameters describing how to scrape them. | `[]` | + +Most of the above configuration match directly with the official Prometheus +configuration which can be found [here](https://prometheus.io/docs/prometheus/latest/configuration/configuration) + ### Tracing Add-On The following table lists the configurable parameters for the Tracing Add-On. diff --git a/charts/linkerd2/requirements.lock b/charts/linkerd2/requirements.lock index eafff356539c8..ee33f734d5496 100644 --- a/charts/linkerd2/requirements.lock +++ b/charts/linkerd2/requirements.lock @@ -2,11 +2,14 @@ dependencies: - name: partials repository: file://../partials version: 0.1.0 +- name: prometheus + repository: file://../add-ons/prometheus + version: 0.1.0 - name: grafana repository: file://../add-ons/grafana version: 0.1.0 - name: tracing repository: file://../add-ons/tracing version: 0.1.0 -digest: sha256:f92907b6d243e3b57b4288603ba76eced7c2f4ef913e76505c314971bb4afa21 -generated: "2020-05-11T14:13:54.306010536-05:00" +digest: sha256:d2428770ae7d5134c5af6521c78a4c5f95da4c75f21bdea0f74fad6ab6e2e044 +generated: "2020-06-24T11:07:53.924602129Z" diff --git a/charts/linkerd2/requirements.yaml b/charts/linkerd2/requirements.yaml index 26c5c62d520af..5f2d1aabd5c7d 100644 --- a/charts/linkerd2/requirements.yaml +++ b/charts/linkerd2/requirements.yaml @@ -2,6 +2,10 @@ dependencies: - name: partials version: 0.1.0 repository: file://../partials +- name: prometheus + version: 0.1.0 + repository: file://../add-ons/prometheus + condition: prometheus.enabled - name: grafana version: 0.1.0 repository: file://../add-ons/grafana diff --git a/charts/linkerd2/templates/controller.yaml b/charts/linkerd2/templates/controller.yaml index e13e77c98a6ae..c84905c3157e4 100644 --- a/charts/linkerd2/templates/controller.yaml +++ b/charts/linkerd2/templates/controller.yaml @@ -72,10 +72,12 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.{{.Values.global.namespace}}.svc.{{.Values.global.clusterDomain}}:9090 - -destination-addr=linkerd-dst.{{.Values.global.namespace}}.svc.{{.Values.global.clusterDomain}}:8086 - -controller-namespace={{.Values.global.namespace}} - - -log-level={{.Values.controllerLogLevel}} + - -log-level={{.Values.global.controllerLogLevel}} + {{- if .Values.prometheus.enabled }} + - -prometheus-url=http://linkerd-prometheus.{{.Values.global.namespace}}.svc.{{.Values.global.clusterDomain}}:9090 + {{- end}} {{- include "partials.linkerd.trace" . | nindent 8 -}} image: {{.Values.controllerImage}}:{{default .Values.global.linkerdVersion .Values.global.controllerImageVersion}} imagePullPolicy: {{.Values.global.imagePullPolicy}} diff --git a/charts/linkerd2/templates/destination.yaml b/charts/linkerd2/templates/destination.yaml index 9f1d7d7a22106..f418318ccc796 100644 --- a/charts/linkerd2/templates/destination.yaml +++ b/charts/linkerd2/templates/destination.yaml @@ -75,7 +75,7 @@ spec: - -addr=:8086 - -controller-namespace={{.Values.global.namespace}} - -enable-h2-upgrade={{.Values.enableH2Upgrade}} - - -log-level={{.Values.controllerLogLevel}} + - -log-level={{.Values.global.controllerLogLevel}} {{- include "partials.linkerd.trace" . | nindent 8 -}} image: {{.Values.controllerImage}}:{{default .Values.global.linkerdVersion .Values.global.controllerImageVersion}} imagePullPolicy: {{.Values.global.imagePullPolicy}} diff --git a/charts/linkerd2/templates/heartbeat.yaml b/charts/linkerd2/templates/heartbeat.yaml index 91b268ff21404..995605ef7b1a5 100644 --- a/charts/linkerd2/templates/heartbeat.yaml +++ b/charts/linkerd2/templates/heartbeat.yaml @@ -42,9 +42,11 @@ spec: imagePullPolicy: {{.Values.global.imagePullPolicy}} args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.{{.Values.global.namespace}}.svc.{{.Values.global.clusterDomain}}:9090" - "-controller-namespace={{.Values.global.namespace}}" - - "-log-level={{.Values.controllerLogLevel}}" + - "-log-level={{.Values.global.controllerLogLevel}}" + {{- if .Values.prometheus.enabled }} + - "-prometheus-url=http://linkerd-prometheus.{{.Values.global.namespace}}.svc.{{.Values.global.clusterDomain}}:9090" + {{- end}} {{- if .Values.heartbeatResources -}} {{- include "partials.resources" .Values.heartbeatResources | nindent 12 }} {{- end }} diff --git a/charts/linkerd2/templates/identity.yaml b/charts/linkerd2/templates/identity.yaml index 8510d9d5ebdb7..38e4f1719cacf 100644 --- a/charts/linkerd2/templates/identity.yaml +++ b/charts/linkerd2/templates/identity.yaml @@ -92,7 +92,7 @@ spec: containers: - args: - identity - - -log-level={{.Values.controllerLogLevel}} + - -log-level={{.Values.global.controllerLogLevel}} {{- include "partials.linkerd.trace" . | nindent 8 -}} image: {{.Values.controllerImage}}:{{default .Values.global.linkerdVersion .Values.global.controllerImageVersion}} imagePullPolicy: {{.Values.global.imagePullPolicy}} diff --git a/charts/linkerd2/templates/linkerd-config-addons.yaml b/charts/linkerd2/templates/linkerd-config-addons.yaml index 50b5847caaaef..e5adf78de8b76 100644 --- a/charts/linkerd2/templates/linkerd-config-addons.yaml +++ b/charts/linkerd2/templates/linkerd-config-addons.yaml @@ -24,5 +24,7 @@ data: grafanaUrl: "{{.Values.global.grafanaUrl}}" grafana: {{- include "linkerd.addons.sanitize-config" .Values.grafana}} + prometheus: + {{- include "linkerd.addons.sanitize-config" .Values.prometheus}} tracing: {{- include "linkerd.addons.sanitize-config" .Values.tracing}} diff --git a/charts/linkerd2/templates/proxy-injector.yaml b/charts/linkerd2/templates/proxy-injector.yaml index 0cd93dd8d967e..606769375d23b 100644 --- a/charts/linkerd2/templates/proxy-injector.yaml +++ b/charts/linkerd2/templates/proxy-injector.yaml @@ -54,7 +54,7 @@ spec: containers: - args: - proxy-injector - - -log-level={{.Values.controllerLogLevel}} + - -log-level={{.Values.global.controllerLogLevel}} image: {{.Values.controllerImage}}:{{default .Values.global.linkerdVersion .Values.global.controllerImageVersion}} imagePullPolicy: {{.Values.global.imagePullPolicy}} livenessProbe: diff --git a/charts/linkerd2/templates/psp.yaml b/charts/linkerd2/templates/psp.yaml index a64fc11a1b2cc..31162dc0f09b3 100644 --- a/charts/linkerd2/templates/psp.yaml +++ b/charts/linkerd2/templates/psp.yaml @@ -103,9 +103,11 @@ subjects: - kind: ServiceAccount name: linkerd-identity namespace: {{.Values.global.namespace}} +{{ if .Values.prometheus.enabled -}} - kind: ServiceAccount name: linkerd-prometheus namespace: {{.Values.global.namespace}} +{{ end -}} - kind: ServiceAccount name: linkerd-proxy-injector namespace: {{.Values.global.namespace}} diff --git a/charts/linkerd2/templates/sp-validator.yaml b/charts/linkerd2/templates/sp-validator.yaml index 09010d80207dd..208fee7a90127 100644 --- a/charts/linkerd2/templates/sp-validator.yaml +++ b/charts/linkerd2/templates/sp-validator.yaml @@ -73,7 +73,7 @@ spec: containers: - args: - sp-validator - - -log-level={{.Values.controllerLogLevel}} + - -log-level={{.Values.global.controllerLogLevel}} image: {{.Values.controllerImage}}:{{default .Values.global.linkerdVersion .Values.global.controllerImageVersion}} imagePullPolicy: {{.Values.global.imagePullPolicy}} livenessProbe: diff --git a/charts/linkerd2/templates/tap.yaml b/charts/linkerd2/templates/tap.yaml index a93a329dc1236..176fd27902ada 100644 --- a/charts/linkerd2/templates/tap.yaml +++ b/charts/linkerd2/templates/tap.yaml @@ -79,7 +79,7 @@ spec: - args: - tap - -controller-namespace={{.Values.global.namespace}} - - -log-level={{.Values.controllerLogLevel}} + - -log-level={{.Values.global.controllerLogLevel}} {{- include "partials.linkerd.trace" . | nindent 8 -}} image: {{.Values.controllerImage}}:{{default .Values.global.linkerdVersion .Values.global.controllerImageVersion}} imagePullPolicy: {{.Values.global.imagePullPolicy}} diff --git a/charts/linkerd2/templates/web.yaml b/charts/linkerd2/templates/web.yaml index 0a49b5ef8d127..01b2c05d6eccb 100644 --- a/charts/linkerd2/templates/web.yaml +++ b/charts/linkerd2/templates/web.yaml @@ -75,7 +75,7 @@ spec: - -jaeger-addr={{.Values.tracing.jaeger.name}}.{{.Values.global.namespace}}.svc.{{.Values.global.clusterDomain}}:16686 {{- end}} - -controller-namespace={{.Values.global.namespace}} - - -log-level={{.Values.controllerLogLevel}} + - -log-level={{.Values.global.controllerLogLevel}} {{- if .Values.enforcedHostRegexp }} - -enforced-host={{.Values.enforcedHostRegexp}} {{- else -}} diff --git a/charts/linkerd2/values-ha.yaml b/charts/linkerd2/values-ha.yaml index d1b92885d8574..3a616492a476a 100644 --- a/charts/linkerd2/values-ha.yaml +++ b/charts/linkerd2/values-ha.yaml @@ -46,13 +46,14 @@ grafana: heartbeatResources: *controller_resources # prometheus configuration -prometheusResources: - cpu: - limit: "4" - request: 300m - memory: - limit: 8192Mi - request: 300Mi +prometheus: + resources: + cpu: + limit: "4" + request: 300m + memory: + limit: 8192Mi + request: 300Mi # proxy injector configuration proxyInjectorResources: *controller_resources diff --git a/charts/linkerd2/values.yaml b/charts/linkerd2/values.yaml index e8dfcd887bd29..96f5734ef5883 100644 --- a/charts/linkerd2/values.yaml +++ b/charts/linkerd2/values.yaml @@ -6,6 +6,7 @@ global: clusterDomain: &cluster_domain cluster.local imagePullPolicy: &image_pull_policy IfNotPresent + controllerLogLevel: &controller_log_level info # control plane trace configuration controlPlaneTracing: false @@ -96,7 +97,6 @@ webhookFailurePolicy: Ignore # controller configuration controllerImage: gcr.io/linkerd-io/controller -controllerLogLevel: &controller_log_level info controllerReplicas: 1 controllerUID: 2103 @@ -150,37 +150,6 @@ identity: disableHeartBeat: false heartbeatSchedule: "0 0 * * *" -# prometheus configuration -prometheusImage: prom/prometheus:v2.15.2 -prometheusLogLevel: *controller_log_level -# set resources for prometheus and prometheus linkerd proxy respectively -# see global.proxy.resources for details. -#prometheusResources: -#prometheusProxyResources: -prometheusExtraArgs: {} - # log.format: json -prometheusAlertmanagers: [] - # - scheme: http - # static_configs: - # - targets: - # - "alertmanager.linkerd.svc:9093" -prometheusRuleConfigMapMounts: [] - # - name: alerting-rules - # subPath: alerting_rules.yml - # configMap: linkerd-prometheus-rules - # - name: recording-rules - # subPath: recording_rules.yml - # configMap: linkerd-prometheus-rules -prometheusPersistence: - ### WARNING: prometheusPersistence is experimental and has not been tested/vetted by the Linkerd team. - ### As such, please refer to https://linkerd.io/2/tasks/exporting-metrics/ for the recommended approach to metrics data retention. - # if enabled, creates a persistent volume claim for prometheus data - # https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims - enabled: false - storageClass: "" - accessMode: ReadWriteOnce - size: 8Gi - # proxy injector configuration proxyInjector: externalSecret: false @@ -276,6 +245,7 @@ smiMetrics: #smiMetricsProxyResources: # Configuration for Add-ons +# Full configuration fields https://github.com/linkerd/linkerd2/tree/master/charts/linkerd2#add-ons-configuration grafana: enabled: true @@ -288,6 +258,53 @@ grafana: # proxy: # resources: +prometheus: + enabled: true + # image: prom/prometheus:v2.15.3 + # args: + # storage.tsdb.retention.time: 6h + # log.level: debug + # globalConfig: + # scrape_interval: 10s + # scrape_timeout: 10s + # scrapeConfigs: + # - job_name: 'kubernetes-nodes' + # scheme: https + # tls_config: + # ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # kubernetes_sd_configs: + # - role: node + # relabel_configs: + # - action: labelmap + # regex: __meta_kubernetes_node_label_(.+) + # alertManagers: + # - scheme: http + # static_configs: + # - targets: + # - "alertmanager.linkerd.svc:9093" + # alertRelabelConfigs: + # - action: labeldrop + # regex: prometheus_replica + # ruleConfigMapMounts: + # - name: alerting-rules + # subPath: alerting_rules.yml + # configMap: linkerd-prometheus-rules + # - name: recording-rules + # subPath: recording_rules.yml + # configMap: linkerd-prometheus-rules + ### WARNING: persistence is experimental and has not been tested/vetted by the Linkerd team. + ### As such, please refer to https://linkerd.io/2/tasks/exporting-metrics/ for the recommended approach to metrics data retention. + # if enabled, creates a persistent volume claim for prometheus data + # https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims + # persistence: + # storageClass: "" + # accessMode: ReadWriteOnce + # size: 8Gi + # resources: + # proxy: + # resources: + tracing: enabled: false collector: diff --git a/cli/cmd/install.go b/cli/cmd/install.go index 948b7d394fba5..a104a90cc204a 100644 --- a/cli/cmd/install.go +++ b/cli/cmd/install.go @@ -43,7 +43,6 @@ type ( controlPlaneVersion string controllerReplicas uint controllerLogLevel string - prometheusImage string highAvailability bool controllerUID int64 disableH2Upgrade bool @@ -128,7 +127,6 @@ var ( "templates/web-rbac.yaml", "templates/serviceprofile-crd.yaml", "templates/trafficsplit-crd.yaml", - "templates/prometheus-rbac.yaml", "templates/proxy-injector-rbac.yaml", "templates/sp-validator-rbac.yaml", "templates/tap-rbac.yaml", @@ -145,7 +143,6 @@ var ( "templates/destination.yaml", "templates/heartbeat.yaml", "templates/web.yaml", - "templates/prometheus.yaml", "templates/proxy-injector.yaml", "templates/sp-validator.yaml", "templates/tap.yaml", @@ -181,8 +178,7 @@ func newInstallOptionsWithDefaults() (*installOptions, error) { clusterDomain: defaults.Global.ClusterDomain, controlPlaneVersion: version.Version, controllerReplicas: defaults.ControllerReplicas, - controllerLogLevel: defaults.ControllerLogLevel, - prometheusImage: defaults.PrometheusImage, + controllerLogLevel: defaults.Global.ControllerLogLevel, highAvailability: defaults.Global.HighAvailability, controllerUID: defaults.ControllerUID, disableH2Upgrade: !defaults.EnableH2Upgrade, @@ -460,11 +456,6 @@ func (options *installOptions) recordableFlagSet() *pflag.FlagSet { "Log level for the controller and web components", ) - flags.StringVar( - &options.prometheusImage, "prometheus-image", options.prometheusImage, - "Custom Prometheus image name", - ) - flags.BoolVar( &options.highAvailability, "ha", options.highAvailability, "Enable HA deployment config for the control plane (default false)", @@ -689,10 +680,6 @@ func (options *installOptions) validate() error { return fmt.Errorf("--controller-log-level must be one of: panic, fatal, error, warn, info, debug") } - if options.prometheusImage != "" && !alphaNumDashDotSlashColonUnderscore.MatchString(options.prometheusImage) { - return fmt.Errorf("%s is not a valid prometheus image", options.prometheusImage) - } - if err := options.proxyConfigOptions.validate(); err != nil { return err } @@ -766,7 +753,7 @@ func (options *installOptions) buildValuesWithoutIdentity(configs *pb.All) (*l5d installValues.Configs.Install = installJSON installValues.ControllerImage = fmt.Sprintf("%s/controller", options.dockerRegistry) installValues.Global.ControllerImageVersion = configs.GetGlobal().GetVersion() - installValues.ControllerLogLevel = options.controllerLogLevel + installValues.Global.ControllerLogLevel = options.controllerLogLevel installValues.ControllerReplicas = options.controllerReplicas installValues.ControllerUID = options.controllerUID installValues.Global.ControlPlaneTracing = options.controlPlaneTracing @@ -775,13 +762,9 @@ func (options *installOptions) buildValuesWithoutIdentity(configs *pb.All) (*l5d installValues.Global.HighAvailability = options.highAvailability installValues.Global.ImagePullPolicy = options.imagePullPolicy installValues.Grafana["image"].(map[string]interface{})["name"] = fmt.Sprintf("%s/grafana", options.dockerRegistry) - if options.prometheusImage != "" { - installValues.PrometheusImage = options.prometheusImage - } installValues.Global.Namespace = controlPlaneNamespace installValues.Global.CNIEnabled = options.cniEnabled installValues.OmitWebhookSideEffects = options.omitWebhookSideEffects - installValues.PrometheusLogLevel = toPromLogLevel(strings.ToLower(options.controllerLogLevel)) installValues.HeartbeatSchedule = options.heartbeatSchedule() installValues.RestrictDashboardPrivileges = options.restrictDashboardPrivileges installValues.DisableHeartBeat = options.disableHeartbeat @@ -836,15 +819,6 @@ func (options *installOptions) buildValuesWithoutIdentity(configs *pb.All) (*l5d return installValues, nil } -func toPromLogLevel(level string) string { - switch level { - case "panic", "fatal": - return "error" - default: - return level - } -} - func render(w io.Writer, values *l5dcharts.Values) error { // Render raw values and create chart config rawValues, err := yaml.Marshal(values) diff --git a/cli/cmd/install_addon_test.go b/cli/cmd/install_addon_test.go index 6d4d1e6c7785b..0056fb1c5614c 100644 --- a/cli/cmd/install_addon_test.go +++ b/cli/cmd/install_addon_test.go @@ -37,6 +37,14 @@ func TestAddOnRender(t *testing.T) { withExistingGrafanaValues, _, _ := withExistingGrafana.validateAndBuild("", nil) addFakeTLSSecrets(withExistingGrafanaValues) + withPrometheusAddOnOverwrite, err := testInstallOptions() + if err != nil { + t.Fatalf("Unexpected error: %v\n", err) + } + withPrometheusAddOnOverwrite.addOnConfig = filepath.Join("testdata", "prom-config.yaml") + withPrometheusAddOnOverwriteValues, _, _ := withPrometheusAddOnOverwrite.validateAndBuild("", nil) + addFakeTLSSecrets(withPrometheusAddOnOverwriteValues) + testCases := []struct { values *charts.Values goldenFileName string @@ -44,6 +52,7 @@ func TestAddOnRender(t *testing.T) { {withTracingAddonValues, "install_tracing.golden"}, {withTracingOverwriteValues, "install_tracing_overwrite.golden"}, {withExistingGrafanaValues, "install_grafana_existing.golden"}, + {withPrometheusAddOnOverwriteValues, "install_prometheus_overwrite.golden"}, } for i, tc := range testCases { @@ -62,7 +71,7 @@ func TestMergeRaw(t *testing.T) { t.Run("Test Ovewriting of Values struct", func(*testing.T) { initialValues := charts.Values{ - PrometheusImage: "initial-prometheus", + WebImage: "initial-web", EnableH2Upgrade: true, ControllerReplicas: 1, OmitWebhookSideEffects: false, @@ -74,14 +83,14 @@ func TestMergeRaw(t *testing.T) { // partially by using omitempty, but then we don't have relevant checks in helm templates as they would // be nil when omitempty is present. rawOverwriteValues := ` -prometheusImage: override-prometheus +webImage: override-web enableH2Upgrade: false controllerReplicas: 2 omitWebhookSideEffects: true enablePodAntiAffinity: true` expectedValues := charts.Values{ - PrometheusImage: "override-prometheus", + WebImage: "override-web", EnableH2Upgrade: false, ControllerReplicas: 2, OmitWebhookSideEffects: true, diff --git a/cli/cmd/install_test.go b/cli/cmd/install_test.go index 25df4de2396d7..9bd5e7330758e 100644 --- a/cli/cmd/install_test.go +++ b/cli/cmd/install_test.go @@ -7,7 +7,6 @@ import ( "testing" "github.com/linkerd/linkerd2/controller/gen/config" - pb "github.com/linkerd/linkerd2/controller/gen/config" charts "github.com/linkerd/linkerd2/pkg/charts/linkerd2" ) @@ -55,9 +54,6 @@ func TestRender(t *testing.T) { metaValues := &charts.Values{ ControllerImage: "ControllerImage", WebImage: "WebImage", - PrometheusImage: "PrometheusImage", - ControllerLogLevel: "ControllerLogLevel", - PrometheusLogLevel: "PrometheusLogLevel", ControllerUID: 2103, EnableH2Upgrade: true, WebhookFailurePolicy: "WebhookFailurePolicy", @@ -73,6 +69,7 @@ func TestRender(t *testing.T) { ImagePullPolicy: "ImagePullPolicy", CliVersion: "CliVersion", ControllerComponentLabel: "ControllerComponentLabel", + ControllerLogLevel: "ControllerLogLevel", ControllerImageVersion: "ControllerImageVersion", ControllerNamespaceLabel: "ControllerNamespaceLabel", WorkloadNamespaceLabel: "WorkloadNamespaceLabel", @@ -137,6 +134,10 @@ func TestRender(t *testing.T) { Dashboard: &charts.Dashboard{ Replicas: 1, }, + Prometheus: charts.Prometheus{ + "enabled": true, + "image": "PrometheusImage", + }, Tracing: map[string]interface{}{ "enabled": false, }, @@ -374,31 +375,6 @@ func TestValidate(t *testing.T) { } }) - t.Run("Ensure log level input is converted to lower case before passing to prometheus", func(t *testing.T) { - underTest, err := testInstallOptions() - if err != nil { - t.Fatalf("Unexpected error: %v\n", err) - } - - underTest.controllerLogLevel = "DEBUG" - expected := "debug" - - testValues := new(pb.All) - testValues.Global = new(pb.Global) - testValues.Proxy = new(pb.Proxy) - testValues.Install = new(pb.Install) - - actual, err := underTest.buildValuesWithoutIdentity(testValues) - - if err != nil { - t.Fatalf("Unexpected error occurred %s", err) - } - - if actual.PrometheusLogLevel != expected { - t.Fatalf("Expected error string\"%s\", got \"%s\"", expected, actual.PrometheusLogLevel) - } - }) - t.Run("Properly validates proxy log level", func(t *testing.T) { testCases := []struct { input string diff --git a/cli/cmd/root.go b/cli/cmd/root.go index 21a9e48fd3391..f42d580e26886 100644 --- a/cli/cmd/root.go +++ b/cli/cmd/root.go @@ -55,10 +55,9 @@ var ( // These regexs are not as strict as they could be, but are a quick and dirty // sanity check against illegal characters. - alphaNumDash = regexp.MustCompile(`^[a-zA-Z0-9-]+$`) - alphaNumDashDot = regexp.MustCompile(`^[\.a-zA-Z0-9-]+$`) - alphaNumDashDotSlashColon = regexp.MustCompile(`^[\./a-zA-Z0-9-:]+$`) - alphaNumDashDotSlashColonUnderscore = regexp.MustCompile(`^[\./a-zA-Z0-9-:_]+$`) + alphaNumDash = regexp.MustCompile(`^[a-zA-Z0-9-]+$`) + alphaNumDashDot = regexp.MustCompile(`^[\.a-zA-Z0-9-]+$`) + alphaNumDashDotSlashColon = regexp.MustCompile(`^[\./a-zA-Z0-9-:]+$`) // Full Rust log level syntax at // https://docs.rs/env_logger/0.6.0/env_logger/#enabling-logging diff --git a/cli/cmd/testdata/install_addon_config.golden b/cli/cmd/testdata/install_addon_config.golden index 8c4946b5b51a6..8cab005064070 100644 --- a/cli/cmd/testdata/install_addon_config.golden +++ b/cli/cmd/testdata/install_addon_config.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -833,40 +792,81 @@ subjects: --- ### -### linkerd-collector RBAC +### Grafana RBAC ### --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-collector + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd --- ### -### linkerd-jaeger RBAC +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### linkerd-collector RBAC ### --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd --- ### -### Grafana RBAC +### linkerd-jaeger RBAC ### --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd diff --git a/cli/cmd/testdata/install_addon_control-plane.golden b/cli/cmd/testdata/install_addon_control-plane.golden index 238b575a31fa1..b9d924389b492 100644 --- a/cli/cmd/testdata/install_addon_control-plane.golden +++ b/cli/cmd/testdata/install_addon_control-plane.golden @@ -320,10 +320,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -752,9 +752,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -989,192 +989,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1182,49 +1017,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /ready + port: 9995 securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -1257,8 +1085,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -1356,232 +1182,25 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-proxy-injector volumes: - - name: data - emptyDir: {} - configMap: - name: linkerd-prometheus-config - name: prometheus-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment +kind: Service +apiVersion: v1 metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 2103 - volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - - mountPath: /var/run/linkerd/tls - name: tls - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-proxy-injector - volumes: - - configMap: - name: linkerd-config - name: config - - name: tls - secret: - secretName: linkerd-proxy-injector-tls - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd + name: linkerd-proxy-injector + namespace: linkerd labels: linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd @@ -2076,6 +1695,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: collector: image: omnition/opencensus-collector:0.1.11 @@ -2086,121 +1707,538 @@ data: name: linkerd-jaeger --- ### -### Tracing Collector Service +### Grafana ### --- -apiVersion: v1 kind: ConfigMap -metadata: - name: linkerd-collector-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: linkerd-collector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - linkerd-collector-config: | - receivers: - opencensus: - port: 55678 - zipkin: - port: 9411 - queued-exporters: - jaeger-all-in-one: - num-workers: 4 - queue-size: 100 - retry-on-failure: true - sender-type: jaeger-thrift-http - jaeger-thrift-http: - collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces - timeout: 5s ---- apiVersion: v1 -kind: Service metadata: - name: linkerd-collector + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - ports: - - name: opencensus - port: 55678 - protocol: TCP - targetPort: 55678 - - name: zipkin - port: 9411 - protocol: TCP - targetPort: 9411 - selector: - linkerd.io/control-plane-component: linkerd-collector ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: linkerd-collector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: linkerd-collector +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: grafana + ports: + - name: http + port: 3000 + targetPort: 3000 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: grafana + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + name: linkerd-grafana + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-grafana + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-grafana + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - env: + - name: GF_PATHS_DATA + value: /data + # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments + # see https://github.com/grafana/grafana/issues/20096 + - name: GODEBUG + value: netdns=go + image: gcr.io/linkerd-io/grafana:install-control-plane-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana + ports: + - containerPort: 3000 + name: http + readinessProbe: + httpGet: + path: /api/health + port: 3000 + securityContext: + runAsUser: 472 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-grafana + volumes: + - emptyDir: {} + name: data + - configMap: + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-collector + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-collector - minReadySeconds: 5 - progressDeadlineSeconds: 120 + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version - prometheus.io/path: /metrics - prometheus.io/port: "8888" - prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-collector + linkerd.io/proxy-deployment: linkerd-prometheus spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 containers: - - command: - - /occollector_linux - - --config=/conf/linkerd-collector-config.yaml - env: - - name: GOGC - value: "80" - image: omnition/opencensus-collector:0.1.11 + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: / - port: 13133 - name: oc-collector + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 55678 - - containerPort: 9411 + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: / - port: 13133 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 volumeMounts: - - mountPath: /conf - name: linkerd-collector-config-val + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2232,6 +2270,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2329,14 +2369,13 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-collector + serviceAccountName: linkerd-prometheus volumes: + - name: data + emptyDir: {} - configMap: - items: - - key: linkerd-collector-config - path: linkerd-collector-config.yaml - name: linkerd-collector-config - name: linkerd-collector-config-val + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -2344,28 +2383,59 @@ spec: name: linkerd-identity-end-entity --- ### -### Tracing Jaeger Service +### Tracing Collector Service ### --- apiVersion: v1 +kind: ConfigMap +metadata: + name: linkerd-collector-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + linkerd-collector-config: | + receivers: + opencensus: + port: 55678 + zipkin: + port: 9411 + queued-exporters: + jaeger-all-in-one: + num-workers: 4 + queue-size: 100 + retry-on-failure: true + sender-type: jaeger-thrift-http + jaeger-thrift-http: + collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces + timeout: 5s +--- +apiVersion: v1 kind: Service metadata: - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP - selector: - linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: collection - port: 14268 - - name: ui - port: 16686 + - name: opencensus + port: 55678 + protocol: TCP + targetPort: 55678 + - name: zipkin + port: 9411 + protocol: TCP + targetPort: 9411 + selector: + linkerd.io/control-plane-component: linkerd-collector --- apiVersion: apps/v1 kind: Deployment @@ -2373,20 +2443,22 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: linkerd-jaeger + app.kubernetes.io/name: linkerd-collector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: linkerd-collector + minReadySeconds: 5 + progressDeadlineSeconds: 120 template: metadata: annotations: @@ -2397,22 +2469,35 @@ spec: prometheus.io/port: "8888" prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: linkerd-collector spec: containers: - - args: - - --query.base-path=/jaeger - image: jaegertracing/all-in-one:1.17.1 + - command: + - /occollector_linux + - --config=/conf/linkerd-collector-config.yaml + env: + - name: GOGC + value: "80" + image: omnition/opencensus-collector:0.1.11 imagePullPolicy: IfNotPresent - name: jaeger + livenessProbe: + httpGet: + path: / + port: 13133 + name: oc-collector ports: - - containerPort: 14268 - name: collection - - containerPort: 16686 - name: ui + - containerPort: 55678 + - containerPort: 9411 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: linkerd-collector-config-val - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2541,9 +2626,14 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - dnsPolicy: ClusterFirst - serviceAccountName: linkerd-jaeger + serviceAccountName: linkerd-collector volumes: + - configMap: + items: + - key: linkerd-collector-config + path: linkerd-collector-config.yaml + name: linkerd-collector-config + name: linkerd-collector-config-val - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -2551,87 +2641,28 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Tracing Jaeger Service ### --- -kind: ConfigMap apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- kind: Service -apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: http - port: 3000 - targetPort: 3000 + - name: collection + port: 14268 + - name: ui + port: 16686 --- apiVersion: apps/v1 kind: Deployment @@ -2639,65 +2670,46 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: linkerd-jaeger app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version + prometheus.io/path: /metrics + prometheus.io/port: "8888" + prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger spec: - nodeSelector: - beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments - # see https://github.com/grafana/grafana/issues/20096 - - name: GODEBUG - value: netdns=go - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - --query.base-path=/jaeger + image: jaegertracing/all-in-one:1.17.1 imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + name: jaeger ports: - - containerPort: 3000 - name: http - readinessProbe: - httpGet: - path: /api/health - port: 3000 - securityContext: - runAsUser: 472 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config - readOnly: true + - containerPort: 14268 + name: collection + - containerPort: 16686 + name: ui - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2826,20 +2838,9 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-grafana + dnsPolicy: ClusterFirst + serviceAccountName: linkerd-jaeger volumes: - - emptyDir: {} - name: data - - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: diff --git a/cli/cmd/testdata/install_config.golden b/cli/cmd/testdata/install_config.golden index c44c7b5a1f997..de8c4b1566372 100644 --- a/cli/cmd/testdata/install_config.golden +++ b/cli/cmd/testdata/install_config.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -844,3 +803,44 @@ metadata: labels: linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd diff --git a/cli/cmd/testdata/install_control-plane.golden b/cli/cmd/testdata/install_control-plane.golden index df20c70591bd5..9d1a58d1be4f6 100644 --- a/cli/cmd/testdata/install_control-plane.golden +++ b/cli/cmd/testdata/install_control-plane.golden @@ -320,10 +320,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -752,9 +752,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -988,192 +988,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1181,49 +1016,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /ready + port: 9995 securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -1256,8 +1084,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -1355,232 +1181,25 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-proxy-injector volumes: - - name: data - emptyDir: {} - configMap: - name: linkerd-prometheus-config - name: prometheus-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment +kind: Service +apiVersion: v1 metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 2103 - volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - - mountPath: /var/run/linkerd/tls - name: tls - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-proxy-injector - volumes: - - configMap: - name: linkerd-config - name: config - - name: tls - secret: - secretName: linkerd-proxy-injector-tls - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd + name: linkerd-proxy-injector + namespace: linkerd labels: linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd @@ -2075,6 +1694,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: enabled: false --- @@ -2373,3 +1994,383 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_controlplane_tracing_output.golden b/cli/cmd/testdata/install_controlplane_tracing_output.golden index 43001cb2634f8..71f5624ac4152 100644 --- a/cli/cmd/testdata/install_controlplane_tracing_output.golden +++ b/cli/cmd/testdata/install_controlplane_tracing_output.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1168,10 +1127,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -trace-collector=linkerd-collector.linkerd.svc.cluster.local:55678 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1630,9 +1589,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1881,192 +1840,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2074,265 +1868,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_TRACE_ATTRIBUTES_PATH - value: /var/run/linkerd/podinfo/labels - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TRACE_COLLECTOR_SVC_ADDR - value: linkerd-collector.linkerd.svc.cluster.local:55678 - - name: LINKERD2_PROXY_TRACE_COLLECTOR_SVC_NAME - value: linkerd-collector.linkerd.serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: var/run/linkerd/podinfo - name: podinfo - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - downwardAPI: - items: - - fieldRef: - fieldPath: metadata.labels - path: "labels" - name: podinfo - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 + path: /ready + port: 9995 securityContext: runAsUser: 2103 volumeMounts: @@ -3025,6 +2589,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: enabled: false --- @@ -3350,3 +2916,438 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_TRACE_ATTRIBUTES_PATH + value: /var/run/linkerd/podinfo/labels + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TRACE_COLLECTOR_SVC_ADDR + value: linkerd-collector.linkerd.svc.cluster.local:55678 + - name: LINKERD2_PROXY_TRACE_COLLECTOR_SVC_NAME + value: linkerd-collector.linkerd.serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: var/run/linkerd/podinfo + name: podinfo + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - downwardAPI: + items: + - fieldRef: + fieldPath: metadata.labels + path: "labels" + name: podinfo + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_custom_registry.golden b/cli/cmd/testdata/install_custom_registry.golden index 1f630f41f1377..c30bd495089a8 100644 --- a/cli/cmd/testdata/install_custom_registry.golden +++ b/cli/cmd/testdata/install_custom_registry.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1153,10 +1112,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: my.custom.registry/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1585,9 +1544,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1821,192 +1780,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2014,251 +1808,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: my.custom.registry/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: my.custom.registry/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: my.custom.registry/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: my.custom.registry/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 + path: /ready + port: 9995 securityContext: runAsUser: 2103 volumeMounts: @@ -2908,6 +2486,8 @@ data: image: name: my.custom.registry/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: enabled: false --- @@ -3219,3 +2799,424 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: my.custom.registry/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: my.custom.registry/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_default.golden b/cli/cmd/testdata/install_default.golden index d4aa70bf31642..ccc43399d846f 100644 --- a/cli/cmd/testdata/install_default.golden +++ b/cli/cmd/testdata/install_default.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1153,10 +1112,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1585,9 +1544,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1821,192 +1780,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2014,251 +1808,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 + path: /ready + port: 9995 securityContext: runAsUser: 2103 volumeMounts: @@ -2908,6 +2486,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: enabled: false --- @@ -3219,3 +2799,424 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_default_override_dst_get_nets.golden b/cli/cmd/testdata/install_default_override_dst_get_nets.golden index 752cdd4b3e19a..c0b4b05636fc9 100644 --- a/cli/cmd/testdata/install_default_override_dst_get_nets.golden +++ b/cli/cmd/testdata/install_default_override_dst_get_nets.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1153,10 +1112,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1585,9 +1544,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1821,192 +1780,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2014,251 +1808,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.0.0.0/8" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 + path: /ready + port: 9995 securityContext: runAsUser: 2103 volumeMounts: @@ -2908,6 +2486,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: enabled: false --- @@ -3219,3 +2799,424 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.0.0.0/8" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_grafana_existing.golden b/cli/cmd/testdata/install_grafana_existing.golden index abb5e6116cea3..e670004a528c4 100644 --- a/cli/cmd/testdata/install_grafana_existing.golden +++ b/cli/cmd/testdata/install_grafana_existing.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1150,10 +1109,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1582,9 +1541,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1818,183 +1777,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2002,52 +1805,45 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /ready + port: 9995 securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG value: warn,linkerd=info - name: LINKERD2_PROXY_LOG_FORMAT value: plain @@ -2077,8 +1873,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2176,41 +1970,80 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-proxy-injector volumes: - - name: data - emptyDir: {} - configMap: - name: linkerd-prometheus-config - name: prometheus-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2218,40 +2051,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2383,80 +2214,63 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2464,41 +2278,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2627,63 +2446,272 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + global: + grafanaUrl: "somegrafana.xyz" + grafana: + enabled: false + prometheus: + enabled: true + tracing: + enabled: false +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2691,46 +2719,50 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 containers: - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 2103 + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2762,6 +2794,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2859,39 +2893,15 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - name: data + emptyDir: {} - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - global: - grafanaUrl: "somegrafana.xyz" - grafana: - enabled: false - tracing: - enabled: false diff --git a/cli/cmd/testdata/install_ha_output.golden b/cli/cmd/testdata/install_ha_output.golden index 2c4bd2cc6fccb..47dbde63add54 100644 --- a/cli/cmd/testdata/install_ha_output.golden +++ b/cli/cmd/testdata/install_ha_output.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1212,10 +1171,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1693,9 +1652,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" resources: limits: cpu: "1" @@ -1949,192 +1908,30 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: - replicas: 1 + replicas: 3 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector + strategy: + rollingUpdate: + maxUnavailable: 1 template: metadata: annotations: @@ -2142,265 +1939,33 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 - containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus - ports: - - containerPort: 9090 - name: admin-http - readinessProbe: - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - resources: - limits: - cpu: "4" - memory: "8192Mi" - requests: - cpu: "300m" - memory: "300Mi" - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - limits: - cpu: "1" - memory: "250Mi" - requests: - cpu: "100m" - memory: "20Mi" - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 3 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - strategy: - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: failure-domain.beta.kubernetes.io/zone - weight: 100 - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: kubernetes.io/hostname + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: kubernetes.io/hostname containers: - args: - proxy-injector @@ -3164,6 +2729,15 @@ data: memory: limit: 1024Mi request: 50Mi + prometheus: + enabled: true + resources: + cpu: + limit: "4" + request: 300m + memory: + limit: 8192Mi + request: 300Mi tracing: enabled: false --- @@ -3488,3 +3062,437 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + resources: + limits: + cpu: "4" + memory: "8192Mi" + requests: + cpu: "300m" + memory: "300Mi" + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + limits: + cpu: "1" + memory: "250Mi" + requests: + cpu: "100m" + memory: "20Mi" + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_ha_with_overrides_output.golden b/cli/cmd/testdata/install_ha_with_overrides_output.golden index 8416ddbd5875d..5e776a4aa27d3 100644 --- a/cli/cmd/testdata/install_ha_with_overrides_output.golden +++ b/cli/cmd/testdata/install_ha_with_overrides_output.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1212,10 +1171,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1693,9 +1652,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" resources: limits: cpu: "1" @@ -1949,192 +1908,30 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: - replicas: 1 + replicas: 2 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector + strategy: + rollingUpdate: + maxUnavailable: 1 template: metadata: annotations: @@ -2142,265 +1939,33 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 - containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus - ports: - - containerPort: 9090 - name: admin-http - readinessProbe: - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - resources: - limits: - cpu: "4" - memory: "8192Mi" - requests: - cpu: "300m" - memory: "300Mi" - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - limits: - cpu: "1" - memory: "250Mi" - requests: - cpu: "400m" - memory: "300Mi" - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 2 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - strategy: - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: failure-domain.beta.kubernetes.io/zone - weight: 100 - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: kubernetes.io/hostname + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: kubernetes.io/hostname containers: - args: - proxy-injector @@ -3164,6 +2729,15 @@ data: memory: limit: 1024Mi request: 50Mi + prometheus: + enabled: true + resources: + cpu: + limit: "4" + request: 300m + memory: + limit: 8192Mi + request: 300Mi tracing: enabled: false --- @@ -3488,3 +3062,437 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + resources: + limits: + cpu: "4" + memory: "8192Mi" + requests: + cpu: "300m" + memory: "300Mi" + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + limits: + cpu: "1" + memory: "250Mi" + requests: + cpu: "400m" + memory: "300Mi" + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_heartbeat_disabled_output.golden b/cli/cmd/testdata/install_heartbeat_disabled_output.golden index c5ccc76bbfb26..52b8d33dd080d 100644 --- a/cli/cmd/testdata/install_heartbeat_disabled_output.golden +++ b/cli/cmd/testdata/install_heartbeat_disabled_output.golden @@ -342,47 +342,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1109,10 +1068,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1732,192 +1691,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1925,253 +1719,37 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /ready + port: 9995 securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 2103 + runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/config name: config @@ -2819,6 +2397,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: enabled: false --- @@ -3130,3 +2710,424 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_helm_output.golden b/cli/cmd/testdata/install_helm_output.golden index 131a7c267aa1e..0dd74f0ec393d 100644 --- a/cli/cmd/testdata/install_helm_output.golden +++ b/cli/cmd/testdata/install_helm_output.golden @@ -398,49 +398,6 @@ spec: description: The apex service of this split. JSONPath: .spec.service --- -# Source: linkerd2/templates/prometheus-rbac.yaml ---- -### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- # Source: linkerd2/templates/proxy-injector-rbac.yaml --- ### @@ -1238,10 +1195,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1656,9 +1613,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1884,438 +1841,64 @@ spec: medium: Memory name: linkerd-identity-end-entity --- -# Source: linkerd2/templates/prometheus.yaml +# Source: linkerd2/templates/proxy-injector.yaml --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: + linkerd.io/helm-release-version: "0" linkerd.io/created-by: linkerd/helm linkerd-version linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - test-trust-anchor - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: test.trust.domain - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:test-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,222 - - --outbound-ports-to-ignore - - 443,111 - image: gcr.io/linkerd-io/proxy-init:test-proxy-init-version - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -# Source: linkerd2/templates/proxy-injector.yaml ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/helm-release-version: "0" - linkerd.io/created-by: linkerd/helm linkerd-version - linkerd.io/identity-mode: default - linkerd.io/proxy-version: test-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:linkerd-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 + failureThreshold: 7 httpGet: path: /ready port: 9995 @@ -2951,6 +2534,18 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -3257,3 +2852,419 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +# Source: linkerd2/charts/prometheus/templates/prometheus-rbac.yaml +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +# Source: linkerd2/charts/prometheus/templates/prometheus.yaml +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: linkerd-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version + linkerd.io/identity-mode: default + linkerd.io/proxy-version: test-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + null + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + test-trust-anchor + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: test.trust.domain + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:test-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191,222 + - --outbound-ports-to-ignore + - 443,111 + image: gcr.io/linkerd-io/proxy-init:test-proxy-init-version + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_helm_output_addons.golden b/cli/cmd/testdata/install_helm_output_addons.golden index 44c193e711012..2aa4555e5c5d6 100644 --- a/cli/cmd/testdata/install_helm_output_addons.golden +++ b/cli/cmd/testdata/install_helm_output_addons.golden @@ -398,49 +398,6 @@ spec: description: The apex service of this split. JSONPath: .spec.service --- -# Source: linkerd2/templates/prometheus-rbac.yaml ---- -### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- # Source: linkerd2/templates/proxy-injector-rbac.yaml --- ### @@ -1238,10 +1195,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1656,9 +1613,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1885,438 +1842,64 @@ spec: medium: Memory name: linkerd-identity-end-entity --- -# Source: linkerd2/templates/prometheus.yaml +# Source: linkerd2/templates/proxy-injector.yaml --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: + linkerd.io/helm-release-version: "0" linkerd.io/created-by: linkerd/helm linkerd-version linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - test-trust-anchor - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: test.trust.domain - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:test-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,222 - - --outbound-ports-to-ignore - - 443,111 - image: gcr.io/linkerd-io/proxy-init:test-proxy-init-version - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -# Source: linkerd2/templates/proxy-injector.yaml ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/helm-release-version: "0" - linkerd.io/created-by: linkerd/helm linkerd-version - linkerd.io/identity-mode: default - linkerd.io/proxy-version: test-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:linkerd-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 + failureThreshold: 7 httpGet: path: /ready port: 9995 @@ -2952,6 +2535,18 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 tracing: collector: image: omnition/opencensus-collector:0.1.11 @@ -2961,90 +2556,105 @@ data: image: jaegertracing/all-in-one:1.17.1 name: linkerd-jaeger --- -# Source: linkerd2/charts/tracing/templates/tracing-rbac.yaml ---- -### -### linkerd-collector RBAC -### ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-collector - namespace: linkerd - labels: - linkerd.io/control-plane-component: linkerd-collector - linkerd.io/control-plane-ns: linkerd +# Source: linkerd2/charts/grafana/templates/grafana-rbac.yaml --- ### -### linkerd-jaeger RBAC +### Grafana RBAC ### --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-jaeger + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd --- -# Source: linkerd2/charts/tracing/templates/tracing.yaml +# Source: linkerd2/charts/grafana/templates/grafana.yaml --- ### -### Tracing Collector Service +### Grafana ### --- -apiVersion: v1 kind: ConfigMap +apiVersion: v1 metadata: - name: linkerd-collector-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/helm linkerd-version data: - linkerd-collector-config: | - receivers: - opencensus: - port: 55678 - zipkin: - port: 9411 - queued-exporters: - jaeger-all-in-one: - num-workers: 4 - queue-size: 100 - retry-on-failure: true - sender-type: jaeger-thrift-http - jaeger-thrift-http: - collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces - timeout: 5s ---- -apiVersion: v1 -kind: Service -metadata: - name: linkerd-collector - namespace: linkerd + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/helm linkerd-version spec: type: ClusterIP - ports: - - name: opencensus - port: 55678 - protocol: TCP - targetPort: 55678 - - name: zipkin - port: 9411 - protocol: TCP - targetPort: 9411 selector: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana + ports: + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -3052,61 +2662,65 @@ metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: linkerd-collector + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-collector + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-collector - minReadySeconds: 5 - progressDeadlineSeconds: 120 + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version - prometheus.io/path: /metrics - prometheus.io/port: "8888" - prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-collector + linkerd.io/proxy-deployment: linkerd-grafana spec: + nodeSelector: + null containers: - - command: - - /occollector_linux - - --config=/conf/linkerd-collector-config.yaml - env: - - name: GOGC - value: "80" - image: omnition/opencensus-collector:0.1.11 + - env: + - name: GF_PATHS_DATA + value: /data + # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments + # see https://github.com/grafana/grafana/issues/20096 + - name: GODEBUG + value: netdns=go + image: gcr.io/linkerd-io/grafana:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: / - port: 13133 - name: oc-collector + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 55678 - - containerPort: 9411 + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: / - port: 13133 + path: /api/health + port: 3000 + securityContext: + runAsUser: 472 volumeMounts: - - mountPath: /conf - name: linkerd-collector-config-val + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config + readOnly: true - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3226,43 +2840,526 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-collector + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: items: - - key: linkerd-collector-config - path: linkerd-collector-config.yaml - name: linkerd-collector-config - name: linkerd-collector-config-val + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +# Source: linkerd2/charts/prometheus/templates/prometheus-rbac.yaml +--- ### -### Tracing Jaeger Service +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +# Source: linkerd2/charts/prometheus/templates/prometheus.yaml +--- +### +### Prometheus ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: linkerd-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version + linkerd.io/identity-mode: default + linkerd.io/proxy-version: test-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + null + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + test-trust-anchor + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: test.trust.domain + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:test-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191,222 + - --outbound-ports-to-ignore + - 443,111 + image: gcr.io/linkerd-io/proxy-init:test-proxy-init-version + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +# Source: linkerd2/charts/tracing/templates/tracing-rbac.yaml +--- +### +### linkerd-collector RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-collector + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-ns: linkerd +--- +### +### linkerd-jaeger RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-jaeger + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-ns: linkerd +--- +# Source: linkerd2/charts/tracing/templates/tracing.yaml +--- +### +### Tracing Collector Service +### +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: linkerd-collector-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +data: + linkerd-collector-config: | + receivers: + opencensus: + port: 55678 + zipkin: + port: 9411 + queued-exporters: + jaeger-all-in-one: + num-workers: 4 + queue-size: 100 + retry-on-failure: true + sender-type: jaeger-thrift-http + jaeger-thrift-http: + collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces + timeout: 5s +--- apiVersion: v1 kind: Service metadata: - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/helm linkerd-version spec: type: ClusterIP - selector: - linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: collection - port: 14268 - - name: ui - port: 16686 + - name: opencensus + port: 55678 + protocol: TCP + targetPort: 55678 + - name: zipkin + port: 9411 + protocol: TCP + targetPort: 9411 + selector: + linkerd.io/control-plane-component: linkerd-collector --- apiVersion: apps/v1 kind: Deployment @@ -3270,20 +3367,22 @@ metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: linkerd-jaeger + app.kubernetes.io/name: linkerd-collector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: linkerd-collector + minReadySeconds: 5 + progressDeadlineSeconds: 120 template: metadata: annotations: @@ -3294,22 +3393,35 @@ spec: prometheus.io/port: "8888" prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: linkerd-collector spec: containers: - - args: - - --query.base-path=/jaeger - image: jaegertracing/all-in-one:1.17.1 + - command: + - /occollector_linux + - --config=/conf/linkerd-collector-config.yaml + env: + - name: GOGC + value: "80" + image: omnition/opencensus-collector:0.1.11 imagePullPolicy: IfNotPresent - name: jaeger + livenessProbe: + httpGet: + path: / + port: 13133 + name: oc-collector ports: - - containerPort: 14268 - name: collection - - containerPort: 16686 - name: ui + - containerPort: 55678 + - containerPort: 9411 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: linkerd-collector-config-val - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3429,114 +3541,43 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - dnsPolicy: ClusterFirst - serviceAccountName: linkerd-jaeger + serviceAccountName: linkerd-collector volumes: + - configMap: + items: + - key: linkerd-collector-config + path: linkerd-collector-config.yaml + name: linkerd-collector-config + name: linkerd-collector-config-val - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -# Source: linkerd2/charts/grafana/templates/grafana-rbac.yaml ---- ### -### Grafana RBAC -### ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd ---- -# Source: linkerd2/charts/grafana/templates/grafana.yaml ---- -### -### Grafana +### Tracing Jaeger Service ### --- -kind: ConfigMap apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- kind: Service -apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/helm linkerd-version spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: http - port: 3000 - targetPort: 3000 + - name: collection + port: 14268 + - name: ui + port: 16686 --- apiVersion: apps/v1 kind: Deployment @@ -3544,65 +3585,46 @@ metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: linkerd-jaeger app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger template: metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version + prometheus.io/path: /metrics + prometheus.io/port: "8888" + prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger spec: - nodeSelector: - null containers: - - env: - - name: GF_PATHS_DATA - value: /data - # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments - # see https://github.com/grafana/grafana/issues/20096 - - name: GODEBUG - value: netdns=go - image: gcr.io/linkerd-io/grafana:linkerd-version + - args: + - --query.base-path=/jaeger + image: jaegertracing/all-in-one:1.17.1 imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + name: jaeger ports: - - containerPort: 3000 - name: http - readinessProbe: - httpGet: - path: /api/health - port: 3000 - securityContext: - runAsUser: 472 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config - readOnly: true + - containerPort: 14268 + name: collection + - containerPort: 16686 + name: ui - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3722,20 +3744,9 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-grafana + dnsPolicy: ClusterFirst + serviceAccountName: linkerd-jaeger volumes: - - emptyDir: {} - name: data - - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: diff --git a/cli/cmd/testdata/install_helm_output_ha.golden b/cli/cmd/testdata/install_helm_output_ha.golden index 1459f10cbfba5..5b6c31b715fba 100644 --- a/cli/cmd/testdata/install_helm_output_ha.golden +++ b/cli/cmd/testdata/install_helm_output_ha.golden @@ -398,49 +398,6 @@ spec: description: The apex service of this split. JSONPath: .spec.service --- -# Source: linkerd2/templates/prometheus-rbac.yaml ---- -### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- # Source: linkerd2/templates/proxy-injector-rbac.yaml --- ### @@ -1297,10 +1254,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1764,9 +1721,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" resources: limits: cpu: "1" @@ -2012,455 +1969,68 @@ spec: medium: Memory name: linkerd-identity-end-entity --- -# Source: linkerd2/templates/prometheus.yaml +# Source: linkerd2/templates/proxy-injector.yaml --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: - replicas: 1 + replicas: 3 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector + strategy: + rollingUpdate: + maxUnavailable: 1 template: metadata: annotations: + linkerd.io/helm-release-version: "0" linkerd.io/created-by: linkerd/helm linkerd-version linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 - containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus - ports: - - containerPort: 9090 - name: admin-http - readinessProbe: - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - resources: - limits: - cpu: "4" - memory: "8192Mi" - requests: - cpu: "300m" - memory: "300Mi" - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - test-trust-anchor - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: test.trust.domain - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:test-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - limits: - cpu: "1" - memory: "250Mi" - requests: - cpu: "100m" - memory: "20Mi" - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,222 - - --outbound-ports-to-ignore - - 443,111 - image: gcr.io/linkerd-io/proxy-init:test-proxy-init-version - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -# Source: linkerd2/templates/proxy-injector.yaml ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 3 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - strategy: - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - annotations: - linkerd.io/helm-release-version: "0" - linkerd.io/created-by: linkerd/helm linkerd-version - linkerd.io/identity-mode: default - linkerd.io/proxy-version: test-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: failure-domain.beta.kubernetes.io/zone - weight: 100 - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: kubernetes.io/hostname + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: kubernetes.io/hostname containers: - args: - proxy-injector @@ -3207,6 +2777,25 @@ data: memory: limit: 1024Mi request: 50Mi + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + resources: + cpu: + limit: "4" + request: 300m + memory: + limit: 8192Mi + request: 300Mi tracing: enabled: false --- @@ -3526,3 +3115,432 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +# Source: linkerd2/charts/prometheus/templates/prometheus-rbac.yaml +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +# Source: linkerd2/charts/prometheus/templates/prometheus.yaml +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: linkerd-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version + linkerd.io/identity-mode: default + linkerd.io/proxy-version: test-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + null + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + resources: + limits: + cpu: "4" + memory: "8192Mi" + requests: + cpu: "300m" + memory: "300Mi" + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + test-trust-anchor + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: test.trust.domain + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:test-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + limits: + cpu: "1" + memory: "250Mi" + requests: + cpu: "100m" + memory: "20Mi" + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191,222 + - --outbound-ports-to-ignore + - 443,111 + image: gcr.io/linkerd-io/proxy-init:test-proxy-init-version + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_no_init_container.golden b/cli/cmd/testdata/install_no_init_container.golden index a68a13822d7fb..dcad55129f6db 100644 --- a/cli/cmd/testdata/install_no_init_container.golden +++ b/cli/cmd/testdata/install_no_init_container.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1112,10 +1071,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1468,9 +1427,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1666,192 +1625,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1859,213 +1653,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 + path: /ready + port: 9995 securityContext: runAsUser: 2103 volumeMounts: @@ -2601,6 +2217,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: enabled: false --- @@ -2874,3 +2492,386 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_output.golden b/cli/cmd/testdata/install_output.golden index 6c9f594bacbff..9aea2f39040fd 100644 --- a/cli/cmd/testdata/install_output.golden +++ b/cli/cmd/testdata/install_output.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-Namespace-prometheus - labels: - ControllerComponentLabel: prometheus - ControllerNamespaceLabel: Namespace -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-Namespace-prometheus - labels: - ControllerComponentLabel: prometheus - ControllerNamespaceLabel: Namespace -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-Namespace-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: Namespace ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: Namespace - labels: - ControllerComponentLabel: prometheus - ControllerNamespaceLabel: Namespace ---- -### ### Proxy Injector RBAC ### --- @@ -1152,10 +1111,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.Namespace.svc.cluster.local:9090 - -destination-addr=linkerd-dst.Namespace.svc.cluster.local:8086 - -controller-namespace=Namespace - -log-level=ControllerLogLevel + - -prometheus-url=http://linkerd-prometheus.Namespace.svc.cluster.local:9090 image: ControllerImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy livenessProbe: @@ -1582,9 +1541,9 @@ spec: imagePullPolicy: ImagePullPolicy args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.Namespace.svc.cluster.local:9090" - "-controller-namespace=Namespace" - "-log-level=ControllerLogLevel" + - "-prometheus-url=http://linkerd-prometheus.Namespace.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1817,192 +1776,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: Namespace - labels: - ControllerComponentLabel: prometheus - ControllerNamespaceLabel: Namespace - annotations: - CreatedByAnnotation: CliVersion -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['Namespace'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['Namespace'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;Namespace$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: Namespace - labels: - ControllerComponentLabel: prometheus - ControllerNamespaceLabel: Namespace - annotations: - CreatedByAnnotation: CliVersion -spec: - type: ClusterIP - selector: - ControllerComponentLabel: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: CreatedByAnnotation: CliVersion labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: ControllerImageVersion - ControllerComponentLabel: prometheus + ControllerComponentLabel: proxy-injector ControllerNamespaceLabel: Namespace - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: Namespace spec: replicas: 1 selector: matchLabels: - ControllerComponentLabel: prometheus - ControllerNamespaceLabel: Namespace - linkerd.io/proxy-deployment: linkerd-prometheus + ControllerComponentLabel: proxy-injector template: metadata: annotations: @@ -2010,250 +1804,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: ProxyVersion labels: - ControllerComponentLabel: prometheus + ControllerComponentLabel: proxy-injector ControllerNamespaceLabel: Namespace WorkloadNamespaceLabel: Namespace - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=prometheusloglevel - image: PrometheusImage + - proxy-injector + - -log-level=ControllerLogLevel + image: ControllerImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.Namespace.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "DestinationGetNetworks" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.Namespace.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: Namespace - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: ProxyImageName:ProxyVersion - imagePullPolicy: ImagePullPolicy - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: ProxyInitImageName:ProxyInitVersion - imagePullPolicy: ImagePullPolicy - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - CreatedByAnnotation: CliVersion - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: ControllerImageVersion - ControllerComponentLabel: proxy-injector - ControllerNamespaceLabel: Namespace - name: linkerd-proxy-injector - namespace: Namespace -spec: - replicas: 1 - selector: - matchLabels: - ControllerComponentLabel: proxy-injector - template: - metadata: - annotations: - CreatedByAnnotation: CliVersion - linkerd.io/identity-mode: default - linkerd.io/proxy-version: ProxyVersion - labels: - ControllerComponentLabel: proxy-injector - ControllerNamespaceLabel: Namespace - WorkloadNamespaceLabel: Namespace - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=ControllerLogLevel - image: ControllerImage:ControllerImageVersion - imagePullPolicy: ImagePullPolicy - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 + path: /ready + port: 9995 securityContext: runAsUser: 2103 volumeMounts: @@ -2900,6 +2479,9 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true + image: PrometheusImage tracing: enabled: false --- @@ -3210,3 +2792,423 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-Namespace-prometheus + labels: + ControllerComponentLabel: prometheus + ControllerNamespaceLabel: Namespace +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-Namespace-prometheus + labels: + ControllerComponentLabel: prometheus + ControllerNamespaceLabel: Namespace +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-Namespace-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: Namespace +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: Namespace + labels: + ControllerComponentLabel: prometheus + ControllerNamespaceLabel: Namespace +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: Namespace + labels: + ControllerComponentLabel: prometheus + ControllerNamespaceLabel: Namespace + annotations: + CreatedByAnnotation: CliVersion +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['Namespace'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['Namespace'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;Namespace$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: Namespace + labels: + ControllerComponentLabel: prometheus + ControllerNamespaceLabel: Namespace + annotations: + CreatedByAnnotation: CliVersion +spec: + type: ClusterIP + selector: + ControllerComponentLabel: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + CreatedByAnnotation: CliVersion + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: ControllerImageVersion + ControllerComponentLabel: prometheus + ControllerNamespaceLabel: Namespace + name: linkerd-prometheus + namespace: Namespace +spec: + replicas: 1 + selector: + matchLabels: + ControllerComponentLabel: prometheus + ControllerNamespaceLabel: Namespace + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + CreatedByAnnotation: CliVersion + linkerd.io/identity-mode: default + linkerd.io/proxy-version: ProxyVersion + labels: + ControllerComponentLabel: prometheus + ControllerNamespaceLabel: Namespace + WorkloadNamespaceLabel: Namespace + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: PrometheusImage + imagePullPolicy: ImagePullPolicy + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.Namespace.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "DestinationGetNetworks" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.Namespace.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: Namespace + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: ProxyImageName:ProxyVersion + imagePullPolicy: ImagePullPolicy + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: ProxyInitImageName:ProxyInitVersion + imagePullPolicy: ImagePullPolicy + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_prometheus_overwrite.golden b/cli/cmd/testdata/install_prometheus_overwrite.golden new file mode 100644 index 0000000000000..1bea2dc660776 --- /dev/null +++ b/cli/cmd/testdata/install_prometheus_overwrite.golden @@ -0,0 +1,3295 @@ +--- +### +### Linkerd Namespace +### +--- +kind: Namespace +apiVersion: v1 +metadata: + name: linkerd + annotations: + linkerd.io/inject: disabled + labels: + linkerd.io/is-control-plane: "true" + config.linkerd.io/admission-webhooks: disabled + linkerd.io/control-plane-ns: linkerd +--- +### +### Identity Controller Service RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-identity + labels: + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] +- apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["get"] +- apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-identity + labels: + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-identity +subjects: +- kind: ServiceAccount + name: linkerd-identity + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-identity + namespace: linkerd + labels: + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd +--- +### +### Controller RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-controller + labels: + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: ["extensions", "apps"] + resources: ["daemonsets", "deployments", "replicasets", "statefulsets"] + verbs: ["list", "get", "watch"] +- apiGroups: ["extensions", "batch"] + resources: ["cronjobs", "jobs"] + verbs: ["list" , "get", "watch"] +- apiGroups: [""] + resources: ["pods", "endpoints", "services", "replicationcontrollers", "namespaces"] + verbs: ["list", "get", "watch"] +- apiGroups: ["linkerd.io"] + resources: ["serviceprofiles"] + verbs: ["list", "get", "watch"] +- apiGroups: ["split.smi-spec.io"] + resources: ["trafficsplits"] + verbs: ["list", "get", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-controller + labels: + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-controller +subjects: +- kind: ServiceAccount + name: linkerd-controller + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-controller + namespace: linkerd + labels: + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd +--- +### +### Destination Controller Service +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-destination + labels: + linkerd.io/control-plane-component: destination + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["list", "get", "watch"] +- apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["list", "get", "watch"] +- apiGroups: [""] + resources: ["pods", "endpoints", "services"] + verbs: ["list", "get", "watch"] +- apiGroups: ["linkerd.io"] + resources: ["serviceprofiles"] + verbs: ["list", "get", "watch"] +- apiGroups: ["split.smi-spec.io"] + resources: ["trafficsplits"] + verbs: ["list", "get", "watch"] +- apiGroups: ["discovery.k8s.io"] + resources: ["endpointslices"] + verbs: ["list", "get", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-destination + labels: + linkerd.io/control-plane-component: destination + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-destination +subjects: +- kind: ServiceAccount + name: linkerd-destination + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-destination + namespace: linkerd + labels: + linkerd.io/control-plane-component: destination + linkerd.io/control-plane-ns: linkerd +--- +### +### Heartbeat RBAC +### +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: linkerd-heartbeat + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["get"] + resourceNames: ["linkerd-config"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: linkerd-heartbeat + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd +roleRef: + kind: Role + name: linkerd-heartbeat + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: linkerd-heartbeat + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-heartbeat + namespace: linkerd + labels: + linkerd.io/control-plane-component: heartbeat + linkerd.io/control-plane-ns: linkerd +--- +### +### Web RBAC +### +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: linkerd-web + namespace: linkerd + labels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["get"] + resourceNames: ["linkerd-config"] +- apiGroups: [""] + resources: ["namespaces", "configmaps"] + verbs: ["get"] +- apiGroups: [""] + resources: ["serviceaccounts", "pods"] + verbs: ["list"] +- apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: linkerd-web + namespace: linkerd + labels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd +roleRef: + kind: Role + name: linkerd-web + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: linkerd-web + namespace: linkerd +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: linkerd-linkerd-web-check + labels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: ["rbac.authorization.k8s.io"] + resources: ["clusterroles", "clusterrolebindings"] + verbs: ["list"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["list"] +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"] + verbs: ["list"] +- apiGroups: ["policy"] + resources: ["podsecuritypolicies"] + verbs: ["list"] +- apiGroups: ["linkerd.io"] + resources: ["serviceprofiles"] + verbs: ["list"] +- apiGroups: ["apiregistration.k8s.io"] + resources: ["apiservices"] + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: linkerd-linkerd-web-check + labels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd +roleRef: + kind: ClusterRole + name: linkerd-linkerd-web-check + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: linkerd-web + namespace: linkerd +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-web-admin + labels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-tap-admin +subjects: +- kind: ServiceAccount + name: linkerd-web + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-web + namespace: linkerd + labels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd +--- +### +### Service Profile CRD +### +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: serviceprofiles.linkerd.io + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + linkerd.io/control-plane-ns: linkerd +spec: + group: linkerd.io + versions: + - name: v1alpha1 + served: true + storage: false + - name: v1alpha2 + served: true + storage: true + scope: Namespaced + names: + plural: serviceprofiles + singular: serviceprofile + kind: ServiceProfile + shortNames: + - sp +--- +### +### TrafficSplit CRD +### Copied from https://github.com/deislabs/smi-sdk-go/blob/cea7e1e9372304bbb6c74a3f6ca788d9eaa9cc58/crds/split.yaml +### +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: trafficsplits.split.smi-spec.io + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + linkerd.io/control-plane-ns: linkerd +spec: + group: split.smi-spec.io + version: v1alpha1 + scope: Namespaced + names: + kind: TrafficSplit + shortNames: + - ts + plural: trafficsplits + singular: trafficsplit + additionalPrinterColumns: + - name: Service + type: string + description: The apex service of this split. + JSONPath: .spec.service +--- +### +### Proxy Injector RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-proxy-injector + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +- apiGroups: [""] + resources: ["namespaces", "replicationcontrollers"] + verbs: ["list", "get", "watch"] +- apiGroups: [""] + resources: ["pods"] + verbs: ["list", "watch"] +- apiGroups: ["extensions", "apps"] + resources: ["deployments", "replicasets", "daemonsets", "statefulsets"] + verbs: ["list", "get", "watch"] +- apiGroups: ["extensions", "batch"] + resources: ["cronjobs", "jobs"] + verbs: ["list", "get", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-proxy-injector + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd +subjects: +- kind: ServiceAccount + name: linkerd-proxy-injector + namespace: linkerd + apiGroup: "" +roleRef: + kind: ClusterRole + name: linkerd-linkerd-proxy-injector + apiGroup: rbac.authorization.k8s.io +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd +--- +kind: Secret +apiVersion: v1 +metadata: + name: linkerd-proxy-injector-tls + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +type: Opaque +data: + crt.pem: cHJveHkgaW5qZWN0b3IgY3J0 + key.pem: cHJveHkgaW5qZWN0b3Iga2V5 +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + name: linkerd-proxy-injector-webhook-config + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd +webhooks: +- name: linkerd-proxy-injector.linkerd.io + namespaceSelector: + matchExpressions: + - key: config.linkerd.io/admission-webhooks + operator: NotIn + values: + - disabled + clientConfig: + service: + name: linkerd-proxy-injector + namespace: linkerd + path: "/" + caBundle: cHJveHkgaW5qZWN0b3IgQ0EgYnVuZGxl + failurePolicy: Ignore + rules: + - operations: [ "CREATE" ] + apiGroups: [""] + apiVersions: ["v1"] + resources: ["pods"] + sideEffects: None +--- +### +### Service Profile Validator RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-sp-validator + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["list"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-sp-validator + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd +subjects: +- kind: ServiceAccount + name: linkerd-sp-validator + namespace: linkerd + apiGroup: "" +roleRef: + kind: ClusterRole + name: linkerd-linkerd-sp-validator + apiGroup: rbac.authorization.k8s.io +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd +--- +kind: Secret +apiVersion: v1 +metadata: + name: linkerd-sp-validator-tls + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +type: Opaque +data: + crt.pem: cHJvZmlsZSB2YWxpZGF0b3IgY3J0 + key.pem: cHJvZmlsZSB2YWxpZGF0b3Iga2V5 +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + name: linkerd-sp-validator-webhook-config + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd +webhooks: +- name: linkerd-sp-validator.linkerd.io + namespaceSelector: + matchExpressions: + - key: config.linkerd.io/admission-webhooks + operator: NotIn + values: + - disabled + clientConfig: + service: + name: linkerd-sp-validator + namespace: linkerd + path: "/" + caBundle: cHJvZmlsZSB2YWxpZGF0b3IgQ0EgYnVuZGxl + failurePolicy: Ignore + rules: + - operations: [ "CREATE" , "UPDATE" ] + apiGroups: ["linkerd.io"] + apiVersions: ["v1alpha1", "v1alpha2"] + resources: ["serviceprofiles"] + sideEffects: None +--- +### +### Tap RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-tap + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["pods", "services", "replicationcontrollers", "namespaces", "nodes"] + verbs: ["list", "get", "watch"] +- apiGroups: ["extensions", "apps"] + resources: ["daemonsets", "deployments", "replicasets", "statefulsets"] + verbs: ["list", "get", "watch"] +- apiGroups: ["extensions", "batch"] + resources: ["cronjobs", "jobs"] + verbs: ["list" , "get", "watch"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-tap-admin + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: ["tap.linkerd.io"] + resources: ["*"] + verbs: ["watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-tap + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-tap +subjects: +- kind: ServiceAccount + name: linkerd-tap + namespace: linkerd +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: linkerd-linkerd-tap-auth-delegator + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: linkerd-tap + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-tap + namespace: linkerd + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: linkerd-linkerd-tap-auth-reader + namespace: kube-system + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: linkerd-tap + namespace: linkerd +--- +kind: Secret +apiVersion: v1 +metadata: + name: linkerd-tap-tls + namespace: linkerd + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +type: Opaque +data: + crt.pem: dGFwIGNydA== + key.pem: dGFwIGtleQ== +--- +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + name: v1alpha1.tap.linkerd.io + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd +spec: + group: tap.linkerd.io + version: v1alpha1 + groupPriorityMinimum: 1000 + versionPriority: 100 + service: + name: linkerd-tap + namespace: linkerd + caBundle: dGFwIENBIGJ1bmRsZQ== +--- +### +### Control Plane PSP +### +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: linkerd-linkerd-control-plane + labels: + linkerd.io/control-plane-ns: linkerd +spec: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + allowedCapabilities: + - NET_ADMIN + - NET_RAW + requiredDropCapabilities: + - ALL + hostNetwork: false + hostIPC: false + hostPID: false + seLinux: + rule: RunAsAny + runAsUser: + rule: RunAsAny + supplementalGroups: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + volumes: + - configMap + - emptyDir + - secret + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: linkerd-psp + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: ['policy', 'extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - linkerd-linkerd-control-plane +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: linkerd-psp + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd +roleRef: + kind: Role + name: linkerd-psp + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: linkerd-controller + namespace: linkerd +- kind: ServiceAccount + name: linkerd-destination + namespace: linkerd +- kind: ServiceAccount + name: linkerd-grafana + namespace: linkerd +- kind: ServiceAccount + name: linkerd-heartbeat + namespace: linkerd +- kind: ServiceAccount + name: linkerd-identity + namespace: linkerd +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +- kind: ServiceAccount + name: linkerd-proxy-injector + namespace: linkerd +- kind: ServiceAccount + name: linkerd-sp-validator + namespace: linkerd +- kind: ServiceAccount + name: linkerd-tap + namespace: linkerd +- kind: ServiceAccount + name: linkerd-web + namespace: linkerd + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + global: | + {"linkerdNamespace":"linkerd","cniEnabled":false,"version":"install-control-plane-version","identityContext":{"trustDomain":"cluster.local","trustAnchorsPem":"-----BEGIN CERTIFICATE-----\nMIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy\nLmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE\nAxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0\nxtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364\n6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF\nBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE\nAiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv\nOLO4Zsk1XrGZHGsmyiEyvYF9lpY=\n-----END CERTIFICATE-----\n","issuanceLifetime":"86400s","clockSkewAllowance":"20s","scheme":"linkerd.io/tls"},"autoInjectContext":null,"omitWebhookSideEffects":false,"clusterDomain":"cluster.local"} + proxy: | + {"proxyImage":{"imageName":"gcr.io/linkerd-io/proxy","pullPolicy":"IfNotPresent"},"proxyInitImage":{"imageName":"gcr.io/linkerd-io/proxy-init","pullPolicy":"IfNotPresent"},"controlPort":{"port":4190},"ignoreInboundPorts":[],"ignoreOutboundPorts":[],"inboundPort":{"port":4143},"adminPort":{"port":4191},"outboundPort":{"port":4140},"resource":{"requestCpu":"","requestMemory":"","limitCpu":"","limitMemory":""},"proxyUid":"2102","logLevel":{"level":"warn,linkerd=info"},"disableExternalProfiles":true,"proxyVersion":"install-proxy-version","proxyInitImageVersion":"v1.3.3","debugImage":{"imageName":"gcr.io/linkerd-io/debug","pullPolicy":"IfNotPresent"},"debugImageVersion":"install-debug-version","destinationGetNetworks":"10.0.0.0/8,172.16.0.0/12,192.168.0.0/16","logFormat":"plain"} + install: | + {"cliVersion":"dev-undefined","flags":[]} +--- +### +### Identity Controller Service +### +--- +kind: Secret +apiVersion: v1 +metadata: + name: linkerd-identity-issuer + namespace: linkerd + labels: + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-issuer-expiry: 2029-02-28T02:03:52Z +data: + crt.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJjakNDQVJpZ0F3SUJBZ0lCQWpBS0JnZ3Foa2pPUFFRREFqQVlNUll3RkFZRFZRUURFdzFqYkhWemRHVnkKTG14dlkyRnNNQjRYRFRFNU1ETXdNekF4TlRrMU1sb1hEVEk1TURJeU9EQXlNRE0xTWxvd0tURW5NQ1VHQTFVRQpBeE1lYVdSbGJuUnBkSGt1YkdsdWEyVnlaQzVqYkhWemRHVnlMbXh2WTJGc01Ga3dFd1lIS29aSXpqMENBUVlJCktvWkl6ajBEQVFjRFFnQUVJU2cwQ21KTkJXTHhKVHNLdDcrYno4QXMxWWZxWkZ1VHEyRm5ZbzAxNk5LVnY3MGUKUUMzVDZ0T3Bhajl4dUtzWGZsVTZaa3VpVlJpaWh3K3RWMmlzcTZOQ01FQXdEZ1lEVlIwUEFRSC9CQVFEQWdFRwpNQjBHQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01CQmdnckJnRUZCUWNEQWpBUEJnTlZIUk1CQWY4RUJUQURBUUgvCk1Bb0dDQ3FHU000OUJBTUNBMGdBTUVVQ0lGK2FNMEJ3MlBkTUZEcS9LdGFCUXZIZEFZYVVQVng4dmYzam4rTTQKQWFENEFpRUE5SEJkanlXeWlLZUt4bEE4Q29PdlVBd0k5NXhjNlhVTW9EeFJTWGpucFhnPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t + key.pem: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSU1JSnltZWtZeitra0NMUGtGbHJVeUF1L2NISllSVHl3Zm1BVVJLS1JYZHpvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFSVNnMENtSk5CV0x4SlRzS3Q3K2J6OEFzMVlmcVpGdVRxMkZuWW8wMTZOS1Z2NzBlUUMzVAo2dE9wYWo5eHVLc1hmbFU2Wmt1aVZSaWlodyt0VjJpc3F3PT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQ== +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-identity + namespace: linkerd + labels: + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: identity + ports: + - name: grpc + port: 8080 + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: identity + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd + name: linkerd-identity + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-identity + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-identity + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - identity + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /ping + port: 9990 + initialDelaySeconds: 10 + name: identity + ports: + - containerPort: 8080 + name: grpc + - containerPort: 9990 + name: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: 9990 + securityContext: + runAsUser: 2103 + volumeMounts: + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/identity/issuer + name: identity-issuer + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: localhost.:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-identity + volumes: + - configMap: + name: linkerd-config + name: config + - name: identity-issuer + secret: + secretName: linkerd-identity-issuer + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### Controller +### +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-controller-api + namespace: linkerd + labels: + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: controller + ports: + - name: http + port: 8085 + targetPort: 8085 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: controller + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd + name: linkerd-controller + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-controller + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-controller + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - public-api + - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 + - -controller-namespace=linkerd + - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + image: gcr.io/linkerd-io/controller:install-control-plane-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: public-api + ports: + - containerPort: 8085 + name: http + - containerPort: 9995 + name: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: 9995 + securityContext: + runAsUser: 2103 + volumeMounts: + - mountPath: /var/run/linkerd/config + name: config + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-controller + volumes: + - configMap: + name: linkerd-config + name: config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### Destination Controller Service +### +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-dst + namespace: linkerd + labels: + linkerd.io/control-plane-component: destination + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: destination + ports: + - name: grpc + port: 8086 + targetPort: 8086 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: destination + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: destination + linkerd.io/control-plane-ns: linkerd + name: linkerd-destination + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: destination + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-destination + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: destination + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-destination + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - destination + - -addr=:8086 + - -controller-namespace=linkerd + - -enable-h2-upgrade=true + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /ping + port: 9996 + initialDelaySeconds: 10 + name: destination + ports: + - containerPort: 8086 + name: grpc + - containerPort: 9996 + name: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: 9996 + securityContext: + runAsUser: 2103 + volumeMounts: + - mountPath: /var/run/linkerd/config + name: config + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: localhost.:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-destination + volumes: + - configMap: + name: linkerd-config + name: config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### Heartbeat +### +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: linkerd-heartbeat + namespace: linkerd + labels: + app.kubernetes.io/name: heartbeat + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: heartbeat + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + schedule: "1 2 3 4 5" + successfulJobsHistoryLimit: 0 + jobTemplate: + spec: + template: + metadata: + labels: + linkerd.io/control-plane-component: heartbeat + linkerd.io/workload-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + spec: + nodeSelector: + beta.kubernetes.io/os: linux + serviceAccountName: linkerd-heartbeat + restartPolicy: Never + containers: + - name: heartbeat + image: gcr.io/linkerd-io/controller:install-control-plane-version + imagePullPolicy: IfNotPresent + args: + - "heartbeat" + - "-controller-namespace=linkerd" + - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" + securityContext: + runAsUser: 2103 +--- +### +### Web +### +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-web + namespace: linkerd + labels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: web + ports: + - name: http + port: 8084 + targetPort: 8084 + - name: admin-http + port: 9994 + targetPort: 9994 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: web + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd + name: linkerd-web + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-web + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-web + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 + - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 + - -controller-namespace=linkerd + - -log-level=info + - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ + image: gcr.io/linkerd-io/web:install-control-plane-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /ping + port: 9994 + initialDelaySeconds: 10 + name: web + ports: + - containerPort: 8084 + name: http + - containerPort: 9994 + name: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: 9994 + securityContext: + runAsUser: 2103 + volumeMounts: + - mountPath: /var/run/linkerd/config + name: config + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-web + volumes: + - configMap: + name: linkerd-config + name: config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### Proxy Injector +### +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: proxy-injector + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + name: linkerd-proxy-injector + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: proxy-injector + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-proxy-injector + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector + ports: + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: 9995 + securityContext: + runAsUser: 2103 + volumeMounts: + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-proxy-injector + volumes: + - configMap: + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- +### +### Service Profile Validator +### +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: sp-validator + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + name: linkerd-sp-validator + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: sp-validator + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-sp-validator + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - sp-validator + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /ping + port: 9997 + initialDelaySeconds: 10 + name: sp-validator + ports: + - containerPort: 8443 + name: sp-validator + - containerPort: 9997 + name: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: 9997 + securityContext: + runAsUser: 2103 + volumeMounts: + - mountPath: /var/run/linkerd/tls + name: tls + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-sp-validator + volumes: + - name: tls + secret: + secretName: linkerd-sp-validator-tls + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### Tap +### +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-tap + namespace: linkerd + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: tap + ports: + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver + port: 443 + targetPort: apiserver +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: tap + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + name: linkerd-tap + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - tap + - -controller-namespace=linkerd + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /ping + port: 9998 + initialDelaySeconds: 10 + name: tap + ports: + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 + name: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: 9998 + securityContext: + runAsUser: 2103 + volumeMounts: + - mountPath: /var/run/linkerd/tls + name: tls + readOnly: true + - mountPath: /var/run/linkerd/config + name: config + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-tap + volumes: + - configMap: + name: linkerd-config + name: config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + +--- +### +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + global: + grafanaUrl: "" + grafana: + enabled: true + image: + name: gcr.io/linkerd-io/grafana + name: linkerd-grafana + prometheus: + alertManagers: + - scheme: http + static_configs: + - targets: + - alertmanager.linkerd.svc:9093 + alertRelabelConfigs: + - action: labeldrop + regex: prometheus_replica + args: + log.format: json + enabled: true + globalConfig: + evaluation_interval: 2m + external_labels: + cluster: cluster-1 + image: linkedin.io/prom + remoteWrite: + - url: http://cortex-service.default:9009/api/prom/push + ruleConfigMapMounts: + - configMap: linkerd-prometheus-rules + name: alerting-rules + subPath: alerting_rules.yml + - configMap: linkerd-prometheus-rules + name: recording-rules + subPath: recording_rules.yml + scrapeConfigs: + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + tracing: + enabled: false +--- +### +### Grafana RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd +--- +### +### Grafana +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: grafana + ports: + - name: http + port: 3000 + targetPort: 3000 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: grafana + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + name: linkerd-grafana + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-grafana + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-grafana + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - env: + - name: GF_PATHS_DATA + value: /data + # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments + # see https://github.com/grafana/grafana/issues/20096 + - name: GODEBUG + value: netdns=go + image: gcr.io/linkerd-io/grafana:install-control-plane-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana + ports: + - containerPort: 3000 + name: http + readinessProbe: + httpGet: + path: /api/health + port: 3000 + securityContext: + runAsUser: 472 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-grafana + volumes: + - emptyDir: {} + name: data + - configMap: + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 2m + external_labels: + cluster: cluster-1 + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + alerting: + alert_relabel_configs: + - action: labeldrop + regex: prometheus_replica + alertmanagers: + - scheme: http + static_configs: + - targets: + - alertmanager.linkerd.svc:9093 + remote_write: + - url: http://cortex-service.default:9009/api/prom/push +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.format=json + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: linkedin.io/prom + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - name: alerting-rules + mountPath: /etc/prometheus/alerting_rules.yml + subPath: alerting_rules.yml + readOnly: true + - name: recording-rules + mountPath: /etc/prometheus/recording_rules.yml + subPath: recording_rules.yml + readOnly: true + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: alerting-rules + configMap: + name: linkerd-prometheus-rules + - name: recording-rules + configMap: + name: linkerd-prometheus-rules + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_proxy_ignores.golden b/cli/cmd/testdata/install_proxy_ignores.golden index 4165525d8a7aa..9630c809fb31a 100644 --- a/cli/cmd/testdata/install_proxy_ignores.golden +++ b/cli/cmd/testdata/install_proxy_ignores.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1153,10 +1112,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1585,9 +1544,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1821,192 +1780,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2014,251 +1808,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,22,8100-8102 - - --outbound-ports-to-ignore - - 443,5432 - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 + path: /ready + port: 9995 securityContext: runAsUser: 2103 volumeMounts: @@ -2908,6 +2486,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: enabled: false --- @@ -3219,3 +2799,424 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191,22,8100-8102 + - --outbound-ports-to-ignore + - 443,5432 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_restricted_dashboard.golden b/cli/cmd/testdata/install_restricted_dashboard.golden index 11ac4cb673edf..6bb6875b94aae 100644 --- a/cli/cmd/testdata/install_restricted_dashboard.golden +++ b/cli/cmd/testdata/install_restricted_dashboard.golden @@ -315,47 +315,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1085,10 +1044,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1517,9 +1476,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1753,192 +1712,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1946,251 +1740,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 + path: /ready + port: 9995 securityContext: runAsUser: 2103 volumeMounts: @@ -2840,6 +2418,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: enabled: false --- @@ -3151,3 +2731,424 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_tracing.golden b/cli/cmd/testdata/install_tracing.golden index 49752f19f90db..f37bd33c2b4fe 100644 --- a/cli/cmd/testdata/install_tracing.golden +++ b/cli/cmd/testdata/install_tracing.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1153,10 +1112,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1585,9 +1544,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1822,192 +1781,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2015,251 +1809,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 + path: /ready + port: 9995 securityContext: runAsUser: 2103 volumeMounts: @@ -2909,6 +2487,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: collector: image: omnition/opencensus-collector:0.1.11 @@ -2919,85 +2499,100 @@ data: name: linkerd-jaeger --- ### -### linkerd-collector RBAC -### ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-collector - namespace: linkerd - labels: - linkerd.io/control-plane-component: linkerd-collector - linkerd.io/control-plane-ns: linkerd ---- -### -### linkerd-jaeger RBAC +### Grafana RBAC ### --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-jaeger + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd --- ### -### Tracing Collector Service +### Grafana ### --- -apiVersion: v1 kind: ConfigMap +apiVersion: v1 metadata: - name: linkerd-collector-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - linkerd-collector-config: | - receivers: - opencensus: - port: 55678 - zipkin: - port: 9411 - queued-exporters: - jaeger-all-in-one: - num-workers: 4 - queue-size: 100 - retry-on-failure: true - sender-type: jaeger-thrift-http - jaeger-thrift-http: - collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces - timeout: 5s ---- -apiVersion: v1 -kind: Service + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- +kind: Service +apiVersion: v1 metadata: - name: linkerd-collector + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP - ports: - - name: opencensus - port: 55678 - protocol: TCP - targetPort: 55678 - - name: zipkin - port: 9411 - protocol: TCP - targetPort: 9411 selector: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana + ports: + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -3005,61 +2600,65 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: linkerd-collector + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-collector + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-collector - minReadySeconds: 5 - progressDeadlineSeconds: 120 + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version - prometheus.io/path: /metrics - prometheus.io/port: "8888" - prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-collector + linkerd.io/proxy-deployment: linkerd-grafana spec: + nodeSelector: + beta.kubernetes.io/os: linux containers: - - command: - - /occollector_linux - - --config=/conf/linkerd-collector-config.yaml - env: - - name: GOGC - value: "80" - image: omnition/opencensus-collector:0.1.11 + - env: + - name: GF_PATHS_DATA + value: /data + # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments + # see https://github.com/grafana/grafana/issues/20096 + - name: GODEBUG + value: netdns=go + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: / - port: 13133 - name: oc-collector + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 55678 - - containerPort: 9411 + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: / - port: 13133 + path: /api/health + port: 3000 + securityContext: + runAsUser: 472 volumeMounts: - - mountPath: /conf - name: linkerd-collector-config-val + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config + readOnly: true - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3188,14 +2787,20 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-collector + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: items: - - key: linkerd-collector-config - path: linkerd-collector-config.yaml - name: linkerd-collector-config - name: linkerd-collector-config-val + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -3203,28 +2808,506 @@ spec: name: linkerd-identity-end-entity --- ### -### Tracing Jaeger Service +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### linkerd-collector RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-collector + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-ns: linkerd +--- +### +### linkerd-jaeger RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-jaeger + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-ns: linkerd +--- +### +### Tracing Collector Service +### +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: linkerd-collector-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + linkerd-collector-config: | + receivers: + opencensus: + port: 55678 + zipkin: + port: 9411 + queued-exporters: + jaeger-all-in-one: + num-workers: 4 + queue-size: 100 + retry-on-failure: true + sender-type: jaeger-thrift-http + jaeger-thrift-http: + collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces + timeout: 5s +--- apiVersion: v1 kind: Service metadata: - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP - selector: - linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: collection - port: 14268 - - name: ui - port: 16686 + - name: opencensus + port: 55678 + protocol: TCP + targetPort: 55678 + - name: zipkin + port: 9411 + protocol: TCP + targetPort: 9411 + selector: + linkerd.io/control-plane-component: linkerd-collector --- apiVersion: apps/v1 kind: Deployment @@ -3232,20 +3315,22 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: linkerd-jaeger + app.kubernetes.io/name: linkerd-collector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: linkerd-collector + minReadySeconds: 5 + progressDeadlineSeconds: 120 template: metadata: annotations: @@ -3256,22 +3341,35 @@ spec: prometheus.io/port: "8888" prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: linkerd-collector spec: containers: - - args: - - --query.base-path=/jaeger - image: jaegertracing/all-in-one:1.17.1 + - command: + - /occollector_linux + - --config=/conf/linkerd-collector-config.yaml + env: + - name: GOGC + value: "80" + image: omnition/opencensus-collector:0.1.11 imagePullPolicy: IfNotPresent - name: jaeger + livenessProbe: + httpGet: + path: / + port: 13133 + name: oc-collector ports: - - containerPort: 14268 - name: collection - - containerPort: 16686 - name: ui + - containerPort: 55678 + - containerPort: 9411 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: linkerd-collector-config-val - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3400,9 +3498,14 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - dnsPolicy: ClusterFirst - serviceAccountName: linkerd-jaeger + serviceAccountName: linkerd-collector volumes: + - configMap: + items: + - key: linkerd-collector-config + path: linkerd-collector-config.yaml + name: linkerd-collector-config + name: linkerd-collector-config-val - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -3410,100 +3513,28 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana RBAC -### ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd ---- -### -### Grafana +### Tracing Jaeger Service ### --- -kind: ConfigMap apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- kind: Service -apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: http - port: 3000 - targetPort: 3000 + - name: collection + port: 14268 + - name: ui + port: 16686 --- apiVersion: apps/v1 kind: Deployment @@ -3511,65 +3542,46 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: linkerd-jaeger app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version + prometheus.io/path: /metrics + prometheus.io/port: "8888" + prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger spec: - nodeSelector: - beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments - # see https://github.com/grafana/grafana/issues/20096 - - name: GODEBUG - value: netdns=go - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - --query.base-path=/jaeger + image: jaegertracing/all-in-one:1.17.1 imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + name: jaeger ports: - - containerPort: 3000 - name: http - readinessProbe: - httpGet: - path: /api/health - port: 3000 - securityContext: - runAsUser: 472 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config - readOnly: true + - containerPort: 14268 + name: collection + - containerPort: 16686 + name: ui - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3698,20 +3710,9 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-grafana + dnsPolicy: ClusterFirst + serviceAccountName: linkerd-jaeger volumes: - - emptyDir: {} - name: data - - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: diff --git a/cli/cmd/testdata/install_tracing_overwrite.golden b/cli/cmd/testdata/install_tracing_overwrite.golden index 3e93e9794c043..e8117eae4c9ec 100644 --- a/cli/cmd/testdata/install_tracing_overwrite.golden +++ b/cli/cmd/testdata/install_tracing_overwrite.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1153,10 +1112,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1585,9 +1544,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1822,192 +1781,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2015,251 +1809,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 + path: /ready + port: 9995 securityContext: runAsUser: 2103 volumeMounts: @@ -2909,6 +2487,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: collector: image: overwrite-collector-image @@ -2919,85 +2499,100 @@ data: name: linkerd-jaeger --- ### -### linkerd-collector RBAC -### ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: overwrite-collector - namespace: linkerd - labels: - linkerd.io/control-plane-component: overwrite-collector - linkerd.io/control-plane-ns: linkerd ---- -### -### linkerd-jaeger RBAC +### Grafana RBAC ### --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-jaeger + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd --- ### -### Tracing Collector Service +### Grafana ### --- -apiVersion: v1 kind: ConfigMap +apiVersion: v1 metadata: - name: overwrite-collector-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - linkerd-collector-config: | - receivers: - opencensus: - port: 55678 - zipkin: - port: 9411 - queued-exporters: - jaeger-all-in-one: - num-workers: 4 - queue-size: 100 - retry-on-failure: true - sender-type: jaeger-thrift-http - jaeger-thrift-http: - collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces - timeout: 5s ---- -apiVersion: v1 -kind: Service + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- +kind: Service +apiVersion: v1 metadata: - name: overwrite-collector + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP - ports: - - name: opencensus - port: 55678 - protocol: TCP - targetPort: 55678 - - name: zipkin - port: 9411 - protocol: TCP - targetPort: 9411 selector: - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: grafana + ports: + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -3005,61 +2600,65 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: overwrite-collector + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: overwrite-collector + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: overwrite-collector - minReadySeconds: 5 - progressDeadlineSeconds: 120 + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version - prometheus.io/path: /metrics - prometheus.io/port: "8888" - prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: overwrite-collector + linkerd.io/proxy-deployment: linkerd-grafana spec: + nodeSelector: + beta.kubernetes.io/os: linux containers: - - command: - - /occollector_linux - - --config=/conf/linkerd-collector-config.yaml - env: - - name: GOGC - value: "80" - image: overwrite-collector-image + - env: + - name: GF_PATHS_DATA + value: /data + # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments + # see https://github.com/grafana/grafana/issues/20096 + - name: GODEBUG + value: netdns=go + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: / - port: 13133 - name: oc-collector + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 55678 - - containerPort: 9411 + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: / - port: 13133 + path: /api/health + port: 3000 + securityContext: + runAsUser: 472 volumeMounts: - - mountPath: /conf - name: overwrite-collector-config-val + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config + readOnly: true - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3162,6 +2761,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 + - --outbound-ports-to-ignore + - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init @@ -3186,14 +2787,20 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: overwrite-collector + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: items: - - key: linkerd-collector-config - path: linkerd-collector-config.yaml - name: overwrite-collector-config - name: overwrite-collector-config-val + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -3201,28 +2808,506 @@ spec: name: linkerd-identity-end-entity --- ### -### Tracing Jaeger Service +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### linkerd-collector RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: overwrite-collector + namespace: linkerd + labels: + linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-ns: linkerd +--- +### +### linkerd-jaeger RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-jaeger + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-ns: linkerd +--- +### +### Tracing Collector Service +### +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: overwrite-collector-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + linkerd-collector-config: | + receivers: + opencensus: + port: 55678 + zipkin: + port: 9411 + queued-exporters: + jaeger-all-in-one: + num-workers: 4 + queue-size: 100 + retry-on-failure: true + sender-type: jaeger-thrift-http + jaeger-thrift-http: + collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces + timeout: 5s +--- apiVersion: v1 kind: Service metadata: - name: linkerd-jaeger + name: overwrite-collector namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: overwrite-collector linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP - selector: - linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: collection - port: 14268 - - name: ui - port: 16686 + - name: opencensus + port: 55678 + protocol: TCP + targetPort: 55678 + - name: zipkin + port: 9411 + protocol: TCP + targetPort: 9411 + selector: + linkerd.io/control-plane-component: overwrite-collector --- apiVersion: apps/v1 kind: Deployment @@ -3230,20 +3315,22 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: linkerd-jaeger + app.kubernetes.io/name: overwrite-collector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: overwrite-collector linkerd.io/control-plane-ns: linkerd - name: linkerd-jaeger + name: overwrite-collector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: overwrite-collector linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: overwrite-collector + minReadySeconds: 5 + progressDeadlineSeconds: 120 template: metadata: annotations: @@ -3254,22 +3341,35 @@ spec: prometheus.io/port: "8888" prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: overwrite-collector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: overwrite-collector spec: containers: - - args: - - --query.base-path=/jaeger - image: jaegertracing/all-in-one:1.17.1 + - command: + - /occollector_linux + - --config=/conf/linkerd-collector-config.yaml + env: + - name: GOGC + value: "80" + image: overwrite-collector-image imagePullPolicy: IfNotPresent - name: jaeger + livenessProbe: + httpGet: + path: / + port: 13133 + name: oc-collector ports: - - containerPort: 14268 - name: collection - - containerPort: 16686 - name: ui + - containerPort: 55678 + - containerPort: 9411 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: overwrite-collector-config-val - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3372,8 +3472,6 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 - - --outbound-ports-to-ignore - - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init @@ -3398,9 +3496,14 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - dnsPolicy: ClusterFirst - serviceAccountName: linkerd-jaeger + serviceAccountName: overwrite-collector volumes: + - configMap: + items: + - key: linkerd-collector-config + path: linkerd-collector-config.yaml + name: overwrite-collector-config + name: overwrite-collector-config-val - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -3408,100 +3511,28 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana RBAC -### ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd ---- -### -### Grafana +### Tracing Jaeger Service ### --- -kind: ConfigMap apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- kind: Service -apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: http - port: 3000 - targetPort: 3000 + - name: collection + port: 14268 + - name: ui + port: 16686 --- apiVersion: apps/v1 kind: Deployment @@ -3509,65 +3540,46 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: linkerd-jaeger app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version + prometheus.io/path: /metrics + prometheus.io/port: "8888" + prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger spec: - nodeSelector: - beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments - # see https://github.com/grafana/grafana/issues/20096 - - name: GODEBUG - value: netdns=go - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - --query.base-path=/jaeger + image: jaegertracing/all-in-one:1.17.1 imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + name: jaeger ports: - - containerPort: 3000 - name: http - readinessProbe: - httpGet: - path: /api/health - port: 3000 - securityContext: - runAsUser: 472 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config - readOnly: true + - containerPort: 14268 + name: collection + - containerPort: 16686 + name: ui - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3696,20 +3708,9 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-grafana + dnsPolicy: ClusterFirst + serviceAccountName: linkerd-jaeger volumes: - - emptyDir: {} - name: data - - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: diff --git a/cli/cmd/testdata/prom-config.yaml b/cli/cmd/testdata/prom-config.yaml new file mode 100644 index 0000000000000..cc4257c57cf24 --- /dev/null +++ b/cli/cmd/testdata/prom-config.yaml @@ -0,0 +1,37 @@ +prometheus: + image: linkedin.io/prom + args: + log.format: json + globalConfig: + evaluation_interval: 2m + external_labels: + cluster: cluster-1 + + scrapeConfigs: + - job_name: 'kubernetes-nodes' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + alertManagers: + - scheme: http + static_configs: + - targets: + - "alertmanager.linkerd.svc:9093" + alertRelabelConfigs: + - action: labeldrop + regex: prometheus_replica + ruleConfigMapMounts: + - name: alerting-rules + subPath: alerting_rules.yml + configMap: linkerd-prometheus-rules + - name: recording-rules + subPath: recording_rules.yml + configMap: linkerd-prometheus-rules + remoteWrite: + - url: http://cortex-service.default:9009/api/prom/push diff --git a/cli/cmd/testdata/upgrade_add-on_config.golden b/cli/cmd/testdata/upgrade_add-on_config.golden index 159dbd65b9cc2..1347b8c5ffe24 100644 --- a/cli/cmd/testdata/upgrade_add-on_config.golden +++ b/cli/cmd/testdata/upgrade_add-on_config.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -833,40 +792,81 @@ subjects: --- ### -### linkerd-collector RBAC +### Grafana RBAC ### --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-collector + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd --- ### -### linkerd-jaeger RBAC +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### linkerd-collector RBAC ### --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd --- ### -### Grafana RBAC +### linkerd-jaeger RBAC ### --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd diff --git a/cli/cmd/testdata/upgrade_add-on_controlplane.golden b/cli/cmd/testdata/upgrade_add-on_controlplane.golden index 48b163b80ef42..1f750fc78d177 100644 --- a/cli/cmd/testdata/upgrade_add-on_controlplane.golden +++ b/cli/cmd/testdata/upgrade_add-on_controlplane.golden @@ -322,10 +322,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -758,9 +758,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -997,192 +997,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1190,49 +1025,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /ready + port: 9995 securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -1265,8 +1093,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -1366,234 +1192,25 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-proxy-injector volumes: - - name: data - emptyDir: {} - configMap: - name: linkerd-prometheus-config - name: prometheus-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment +kind: Service +apiVersion: v1 metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 2103 - volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - - mountPath: /var/run/linkerd/tls - name: tls - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-proxy-injector - volumes: - - configMap: - name: linkerd-config - name: config - - name: tls - secret: - secretName: linkerd-proxy-injector-tls - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd + name: linkerd-proxy-injector + namespace: linkerd labels: linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd @@ -2092,6 +1709,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: collector: image: omnition/opencensus-collector:0.1.11 @@ -2102,121 +1721,540 @@ data: name: linkerd-jaeger --- ### -### Tracing Collector Service +### Grafana ### --- -apiVersion: v1 kind: ConfigMap -metadata: - name: linkerd-collector-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: linkerd-collector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - linkerd-collector-config: | - receivers: - opencensus: - port: 55678 - zipkin: - port: 9411 - queued-exporters: - jaeger-all-in-one: - num-workers: 4 - queue-size: 100 - retry-on-failure: true - sender-type: jaeger-thrift-http - jaeger-thrift-http: - collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces - timeout: 5s ---- apiVersion: v1 -kind: Service metadata: - name: linkerd-collector + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - ports: - - name: opencensus - port: 55678 - protocol: TCP - targetPort: 55678 - - name: zipkin - port: 9411 - protocol: TCP - targetPort: 9411 - selector: - linkerd.io/control-plane-component: linkerd-collector ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: linkerd-collector +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: grafana + ports: + - name: http + port: 3000 + targetPort: 3000 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: grafana + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + name: linkerd-grafana + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-grafana + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-grafana + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - env: + - name: GF_PATHS_DATA + value: /data + # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments + # see https://github.com/grafana/grafana/issues/20096 + - name: GODEBUG + value: netdns=go + image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana + ports: + - containerPort: 3000 + name: http + readinessProbe: + httpGet: + path: /api/health + port: 3000 + securityContext: + runAsUser: 472 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw + JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 + MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r + ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg + Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ + Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB + /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe + aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC + IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R + SxZLbJKt6SJIIY9dw5gzQpUQR2U= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-grafana + volumes: + - emptyDir: {} + name: data + - configMap: + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-collector + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-collector - minReadySeconds: 5 - progressDeadlineSeconds: 120 + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - prometheus.io/path: /metrics - prometheus.io/port: "8888" - prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-collector + linkerd.io/proxy-deployment: linkerd-prometheus spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 containers: - - command: - - /occollector_linux - - --config=/conf/linkerd-collector-config.yaml - env: - - name: GOGC - value: "80" - image: omnition/opencensus-collector:0.1.11 + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: / - port: 13133 - name: oc-collector + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 55678 - - containerPort: 9411 + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: / - port: 13133 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 volumeMounts: - - mountPath: /conf - name: linkerd-collector-config-val + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2248,6 +2286,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2347,14 +2387,13 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-collector + serviceAccountName: linkerd-prometheus volumes: + - name: data + emptyDir: {} - configMap: - items: - - key: linkerd-collector-config - path: linkerd-collector-config.yaml - name: linkerd-collector-config - name: linkerd-collector-config-val + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -2362,28 +2401,59 @@ spec: name: linkerd-identity-end-entity --- ### -### Tracing Jaeger Service +### Tracing Collector Service ### --- apiVersion: v1 +kind: ConfigMap +metadata: + name: linkerd-collector-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + linkerd-collector-config: | + receivers: + opencensus: + port: 55678 + zipkin: + port: 9411 + queued-exporters: + jaeger-all-in-one: + num-workers: 4 + queue-size: 100 + retry-on-failure: true + sender-type: jaeger-thrift-http + jaeger-thrift-http: + collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces + timeout: 5s +--- +apiVersion: v1 kind: Service metadata: - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP - selector: - linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: collection - port: 14268 - - name: ui - port: 16686 + - name: opencensus + port: 55678 + protocol: TCP + targetPort: 55678 + - name: zipkin + port: 9411 + protocol: TCP + targetPort: 9411 + selector: + linkerd.io/control-plane-component: linkerd-collector --- apiVersion: apps/v1 kind: Deployment @@ -2391,20 +2461,22 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: linkerd-jaeger + app.kubernetes.io/name: linkerd-collector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: linkerd-collector + minReadySeconds: 5 + progressDeadlineSeconds: 120 template: metadata: annotations: @@ -2415,22 +2487,35 @@ spec: prometheus.io/port: "8888" prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: linkerd-collector spec: containers: - - args: - - --query.base-path=/jaeger - image: jaegertracing/all-in-one:1.17.1 + - command: + - /occollector_linux + - --config=/conf/linkerd-collector-config.yaml + env: + - name: GOGC + value: "80" + image: omnition/opencensus-collector:0.1.11 imagePullPolicy: IfNotPresent - name: jaeger + livenessProbe: + httpGet: + path: / + port: 13133 + name: oc-collector ports: - - containerPort: 14268 - name: collection - - containerPort: 16686 - name: ui + - containerPort: 55678 + - containerPort: 9411 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: linkerd-collector-config-val - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2561,9 +2646,14 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - dnsPolicy: ClusterFirst - serviceAccountName: linkerd-jaeger + serviceAccountName: linkerd-collector volumes: + - configMap: + items: + - key: linkerd-collector-config + path: linkerd-collector-config.yaml + name: linkerd-collector-config + name: linkerd-collector-config-val - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -2571,87 +2661,28 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Tracing Jaeger Service ### --- -kind: ConfigMap apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- kind: Service -apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: http - port: 3000 - targetPort: 3000 + - name: collection + port: 14268 + - name: ui + port: 16686 --- apiVersion: apps/v1 kind: Deployment @@ -2659,65 +2690,46 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: linkerd-jaeger app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + prometheus.io/path: /metrics + prometheus.io/port: "8888" + prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger spec: - nodeSelector: - beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments - # see https://github.com/grafana/grafana/issues/20096 - - name: GODEBUG - value: netdns=go - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - --query.base-path=/jaeger + image: jaegertracing/all-in-one:1.17.1 imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + name: jaeger ports: - - containerPort: 3000 - name: http - readinessProbe: - httpGet: - path: /api/health - port: 3000 - securityContext: - runAsUser: 472 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config - readOnly: true + - containerPort: 14268 + name: collection + - containerPort: 16686 + name: ui - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2848,20 +2860,9 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-grafana + dnsPolicy: ClusterFirst + serviceAccountName: linkerd-jaeger volumes: - - emptyDir: {} - name: data - - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: diff --git a/cli/cmd/testdata/upgrade_add-on_overwrite.golden b/cli/cmd/testdata/upgrade_add-on_overwrite.golden index ff27f59304184..14f6eb13f0d48 100644 --- a/cli/cmd/testdata/upgrade_add-on_overwrite.golden +++ b/cli/cmd/testdata/upgrade_add-on_overwrite.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1155,10 +1114,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1591,9 +1550,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1830,192 +1789,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2023,253 +1817,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 + path: /ready + port: 9995 securityContext: runAsUser: 2103 volumeMounts: @@ -2925,6 +2501,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: collector: image: overwrite-collector-image @@ -2937,85 +2515,100 @@ data: resources: null --- ### -### linkerd-collector RBAC -### ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: overwrite-collector - namespace: linkerd - labels: - linkerd.io/control-plane-component: overwrite-collector - linkerd.io/control-plane-ns: linkerd ---- -### -### linkerd-jaeger RBAC +### Grafana RBAC ### --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-jaeger + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd --- ### -### Tracing Collector Service +### Grafana ### --- -apiVersion: v1 kind: ConfigMap +apiVersion: v1 metadata: - name: overwrite-collector-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - linkerd-collector-config: | - receivers: - opencensus: - port: 55678 - zipkin: - port: 9411 - queued-exporters: - jaeger-all-in-one: - num-workers: 4 - queue-size: 100 - retry-on-failure: true - sender-type: jaeger-thrift-http - jaeger-thrift-http: - collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces - timeout: 5s ---- -apiVersion: v1 + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- kind: Service +apiVersion: v1 metadata: - name: overwrite-collector + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP - ports: - - name: opencensus - port: 55678 - protocol: TCP - targetPort: 55678 - - name: zipkin - port: 9411 - protocol: TCP - targetPort: 9411 selector: - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: grafana + ports: + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -3023,61 +2616,65 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: overwrite-collector + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: overwrite-collector + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: overwrite-collector - minReadySeconds: 5 - progressDeadlineSeconds: 120 + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - prometheus.io/path: /metrics - prometheus.io/port: "8888" - prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: overwrite-collector + linkerd.io/proxy-deployment: linkerd-grafana spec: + nodeSelector: + beta.kubernetes.io/os: linux containers: - - command: - - /occollector_linux - - --config=/conf/linkerd-collector-config.yaml - env: - - name: GOGC - value: "80" - image: overwrite-collector-image + - env: + - name: GF_PATHS_DATA + value: /data + # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments + # see https://github.com/grafana/grafana/issues/20096 + - name: GODEBUG + value: netdns=go + image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: / - port: 13133 - name: oc-collector + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 55678 - - containerPort: 9411 + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: / - port: 13133 + path: /api/health + port: 3000 + securityContext: + runAsUser: 472 volumeMounts: - - mountPath: /conf - name: overwrite-collector-config-val + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config + readOnly: true - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3182,6 +2779,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 + - --outbound-ports-to-ignore + - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init @@ -3206,14 +2805,20 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: overwrite-collector + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: items: - - key: linkerd-collector-config - path: linkerd-collector-config.yaml - name: overwrite-collector-config - name: overwrite-collector-config-val + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -3221,28 +2826,508 @@ spec: name: linkerd-identity-end-entity --- ### -### Tracing Jaeger Service +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw + JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 + MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r + ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg + Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ + Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB + /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe + aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC + IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R + SxZLbJKt6SJIIY9dw5gzQpUQR2U= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### linkerd-collector RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: overwrite-collector + namespace: linkerd + labels: + linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-ns: linkerd +--- +### +### linkerd-jaeger RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-jaeger + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-ns: linkerd +--- +### +### Tracing Collector Service +### +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: overwrite-collector-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + linkerd-collector-config: | + receivers: + opencensus: + port: 55678 + zipkin: + port: 9411 + queued-exporters: + jaeger-all-in-one: + num-workers: 4 + queue-size: 100 + retry-on-failure: true + sender-type: jaeger-thrift-http + jaeger-thrift-http: + collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces + timeout: 5s +--- apiVersion: v1 kind: Service metadata: - name: linkerd-jaeger + name: overwrite-collector namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: overwrite-collector linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP - selector: - linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: collection - port: 14268 - - name: ui - port: 16686 + - name: opencensus + port: 55678 + protocol: TCP + targetPort: 55678 + - name: zipkin + port: 9411 + protocol: TCP + targetPort: 9411 + selector: + linkerd.io/control-plane-component: overwrite-collector --- apiVersion: apps/v1 kind: Deployment @@ -3250,20 +3335,22 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: linkerd-jaeger + app.kubernetes.io/name: overwrite-collector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: overwrite-collector linkerd.io/control-plane-ns: linkerd - name: linkerd-jaeger + name: overwrite-collector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: overwrite-collector linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: overwrite-collector + minReadySeconds: 5 + progressDeadlineSeconds: 120 template: metadata: annotations: @@ -3274,22 +3361,35 @@ spec: prometheus.io/port: "8888" prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: overwrite-collector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: overwrite-collector spec: containers: - - args: - - --query.base-path=/jaeger - image: jaegertracing/all-in-one:1.17.1 + - command: + - /occollector_linux + - --config=/conf/linkerd-collector-config.yaml + env: + - name: GOGC + value: "80" + image: overwrite-collector-image imagePullPolicy: IfNotPresent - name: jaeger + livenessProbe: + httpGet: + path: / + port: 13133 + name: oc-collector ports: - - containerPort: 14268 - name: collection - - containerPort: 16686 - name: ui + - containerPort: 55678 + - containerPort: 9411 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: overwrite-collector-config-val - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3394,8 +3494,6 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 - - --outbound-ports-to-ignore - - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init @@ -3420,9 +3518,14 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - dnsPolicy: ClusterFirst - serviceAccountName: linkerd-jaeger + serviceAccountName: overwrite-collector volumes: + - configMap: + items: + - key: linkerd-collector-config + path: linkerd-collector-config.yaml + name: overwrite-collector-config + name: overwrite-collector-config-val - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -3430,100 +3533,28 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana RBAC -### ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd ---- -### -### Grafana +### Tracing Jaeger Service ### --- -kind: ConfigMap apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- kind: Service -apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: http - port: 3000 - targetPort: 3000 + - name: collection + port: 14268 + - name: ui + port: 16686 --- apiVersion: apps/v1 kind: Deployment @@ -3531,65 +3562,46 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: linkerd-jaeger app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + prometheus.io/path: /metrics + prometheus.io/port: "8888" + prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger spec: - nodeSelector: - beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments - # see https://github.com/grafana/grafana/issues/20096 - - name: GODEBUG - value: netdns=go - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - --query.base-path=/jaeger + image: jaegertracing/all-in-one:1.17.1 imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + name: jaeger ports: - - containerPort: 3000 - name: http - readinessProbe: - httpGet: - path: /api/health - port: 3000 - securityContext: - runAsUser: 472 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config - readOnly: true + - containerPort: 14268 + name: collection + - containerPort: 16686 + name: ui - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3720,20 +3732,9 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-grafana + dnsPolicy: ClusterFirst + serviceAccountName: linkerd-jaeger volumes: - - emptyDir: {} - name: data - - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: diff --git a/cli/cmd/testdata/upgrade_add_add-on.golden b/cli/cmd/testdata/upgrade_add_add-on.golden index d86402b23c9c4..5924df7872173 100644 --- a/cli/cmd/testdata/upgrade_add_add-on.golden +++ b/cli/cmd/testdata/upgrade_add_add-on.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1155,10 +1114,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1591,9 +1550,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1830,192 +1789,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2023,253 +1817,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 + path: /ready + port: 9995 securityContext: runAsUser: 2103 volumeMounts: @@ -2925,6 +2501,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: collector: image: omnition/opencensus-collector:0.1.11 @@ -2935,85 +2513,100 @@ data: name: linkerd-jaeger --- ### -### linkerd-collector RBAC -### ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-collector - namespace: linkerd - labels: - linkerd.io/control-plane-component: linkerd-collector - linkerd.io/control-plane-ns: linkerd ---- -### -### linkerd-jaeger RBAC +### Grafana RBAC ### --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-jaeger + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd --- ### -### Tracing Collector Service +### Grafana ### --- -apiVersion: v1 kind: ConfigMap +apiVersion: v1 metadata: - name: linkerd-collector-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - linkerd-collector-config: | - receivers: - opencensus: - port: 55678 - zipkin: - port: 9411 - queued-exporters: - jaeger-all-in-one: - num-workers: 4 - queue-size: 100 - retry-on-failure: true - sender-type: jaeger-thrift-http - jaeger-thrift-http: - collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces - timeout: 5s ---- -apiVersion: v1 + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- kind: Service +apiVersion: v1 metadata: - name: linkerd-collector + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP - ports: - - name: opencensus - port: 55678 - protocol: TCP - targetPort: 55678 - - name: zipkin - port: 9411 - protocol: TCP - targetPort: 9411 selector: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana + ports: + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -3021,61 +2614,65 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: linkerd-collector + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-collector + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-collector - minReadySeconds: 5 - progressDeadlineSeconds: 120 + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - prometheus.io/path: /metrics - prometheus.io/port: "8888" - prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-collector + linkerd.io/proxy-deployment: linkerd-grafana spec: + nodeSelector: + beta.kubernetes.io/os: linux containers: - - command: - - /occollector_linux - - --config=/conf/linkerd-collector-config.yaml - env: - - name: GOGC - value: "80" - image: omnition/opencensus-collector:0.1.11 + - env: + - name: GF_PATHS_DATA + value: /data + # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments + # see https://github.com/grafana/grafana/issues/20096 + - name: GODEBUG + value: netdns=go + image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: / - port: 13133 - name: oc-collector + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 55678 - - containerPort: 9411 + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: / - port: 13133 + path: /api/health + port: 3000 + securityContext: + runAsUser: 472 volumeMounts: - - mountPath: /conf - name: linkerd-collector-config-val + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config + readOnly: true - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3206,14 +2803,20 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-collector + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: items: - - key: linkerd-collector-config - path: linkerd-collector-config.yaml - name: linkerd-collector-config - name: linkerd-collector-config-val + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -3221,28 +2824,508 @@ spec: name: linkerd-identity-end-entity --- ### -### Tracing Jaeger Service +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw + JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 + MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r + ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg + Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ + Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB + /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe + aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC + IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R + SxZLbJKt6SJIIY9dw5gzQpUQR2U= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### linkerd-collector RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-collector + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-ns: linkerd +--- +### +### linkerd-jaeger RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-jaeger + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-ns: linkerd +--- +### +### Tracing Collector Service +### +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: linkerd-collector-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + linkerd-collector-config: | + receivers: + opencensus: + port: 55678 + zipkin: + port: 9411 + queued-exporters: + jaeger-all-in-one: + num-workers: 4 + queue-size: 100 + retry-on-failure: true + sender-type: jaeger-thrift-http + jaeger-thrift-http: + collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces + timeout: 5s +--- apiVersion: v1 kind: Service metadata: - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP - selector: - linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: collection - port: 14268 - - name: ui - port: 16686 + - name: opencensus + port: 55678 + protocol: TCP + targetPort: 55678 + - name: zipkin + port: 9411 + protocol: TCP + targetPort: 9411 + selector: + linkerd.io/control-plane-component: linkerd-collector --- apiVersion: apps/v1 kind: Deployment @@ -3250,20 +3333,22 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: linkerd-jaeger + app.kubernetes.io/name: linkerd-collector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: linkerd-collector + minReadySeconds: 5 + progressDeadlineSeconds: 120 template: metadata: annotations: @@ -3274,22 +3359,35 @@ spec: prometheus.io/port: "8888" prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: linkerd-collector spec: containers: - - args: - - --query.base-path=/jaeger - image: jaegertracing/all-in-one:1.17.1 + - command: + - /occollector_linux + - --config=/conf/linkerd-collector-config.yaml + env: + - name: GOGC + value: "80" + image: omnition/opencensus-collector:0.1.11 imagePullPolicy: IfNotPresent - name: jaeger + livenessProbe: + httpGet: + path: / + port: 13133 + name: oc-collector ports: - - containerPort: 14268 - name: collection - - containerPort: 16686 - name: ui + - containerPort: 55678 + - containerPort: 9411 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: linkerd-collector-config-val - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3420,9 +3518,14 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - dnsPolicy: ClusterFirst - serviceAccountName: linkerd-jaeger + serviceAccountName: linkerd-collector volumes: + - configMap: + items: + - key: linkerd-collector-config + path: linkerd-collector-config.yaml + name: linkerd-collector-config + name: linkerd-collector-config-val - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -3430,100 +3533,28 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana RBAC -### ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd ---- -### -### Grafana +### Tracing Jaeger Service ### --- -kind: ConfigMap apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- kind: Service -apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: http - port: 3000 - targetPort: 3000 + - name: collection + port: 14268 + - name: ui + port: 16686 --- apiVersion: apps/v1 kind: Deployment @@ -3531,65 +3562,46 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: linkerd-jaeger app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + prometheus.io/path: /metrics + prometheus.io/port: "8888" + prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger spec: - nodeSelector: - beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments - # see https://github.com/grafana/grafana/issues/20096 - - name: GODEBUG - value: netdns=go - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - --query.base-path=/jaeger + image: jaegertracing/all-in-one:1.17.1 imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + name: jaeger ports: - - containerPort: 3000 - name: http - readinessProbe: - httpGet: - path: /api/health - port: 3000 - securityContext: - runAsUser: 472 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config - readOnly: true + - containerPort: 14268 + name: collection + - containerPort: 16686 + name: ui - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3720,20 +3732,9 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-grafana + dnsPolicy: ClusterFirst + serviceAccountName: linkerd-jaeger volumes: - - emptyDir: {} - name: data - - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: diff --git a/cli/cmd/testdata/upgrade_default.golden b/cli/cmd/testdata/upgrade_default.golden index b571781bba24f..8e534f9484f45 100644 --- a/cli/cmd/testdata/upgrade_default.golden +++ b/cli/cmd/testdata/upgrade_default.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1155,10 +1114,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1591,9 +1550,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1829,192 +1788,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2022,253 +1816,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,2525-2527,2529 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 + path: /ready + port: 9995 securityContext: runAsUser: 2103 volumeMounts: @@ -2924,6 +2500,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: enabled: false --- @@ -3237,3 +2815,426 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw + JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 + MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r + ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg + Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ + Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB + /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe + aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC + IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R + SxZLbJKt6SJIIY9dw5gzQpUQR2U= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191,2525-2527,2529 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_external_issuer.golden b/cli/cmd/testdata/upgrade_external_issuer.golden index faba0ef392143..1e91baaed0d4e 100644 --- a/cli/cmd/testdata/upgrade_external_issuer.golden +++ b/cli/cmd/testdata/upgrade_external_issuer.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1141,10 +1100,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1577,9 +1536,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1815,192 +1774,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2008,253 +1802,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 + path: /ready + port: 9995 securityContext: runAsUser: 2103 volumeMounts: @@ -2910,6 +2486,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: enabled: false --- @@ -3223,3 +2801,426 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw + JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 + MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r + ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg + Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ + Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB + /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe + aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC + IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R + SxZLbJKt6SJIIY9dw5gzQpUQR2U= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml index d9bf6fa7b5824..5200f730f178a 100644 --- a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1155,10 +1114,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1591,9 +1550,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1829,192 +1788,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2022,253 +1816,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 + path: /ready + port: 9995 securityContext: runAsUser: 2103 volumeMounts: @@ -2924,6 +2500,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: enabled: false --- @@ -3237,3 +2815,426 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw + JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 + MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r + ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg + Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ + Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB + /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe + aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC + IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R + SxZLbJKt6SJIIY9dw5gzQpUQR2U= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_grafana_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_disabled.yaml index 97ccc21721ead..915a6b4d2fb91 100644 --- a/cli/cmd/testdata/upgrade_grafana_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_disabled.yaml @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1152,10 +1111,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1588,9 +1547,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1825,183 +1784,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2009,52 +1812,45 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /ready + port: 9995 securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG value: warn,linkerd=info - name: LINKERD2_PROXY_LOG_FORMAT value: plain @@ -2084,8 +1880,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2185,41 +1979,80 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-proxy-injector volumes: - - name: data - emptyDir: {} - configMap: - name: linkerd-prometheus-config - name: prometheus-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2227,40 +2060,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2394,80 +2225,63 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2475,41 +2289,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2640,63 +2459,272 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + global: + grafanaUrl: "" + grafana: + enabled: false + prometheus: + enabled: true + tracing: + enabled: false +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2704,46 +2732,50 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 containers: - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 2103 + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2775,6 +2807,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2874,39 +2908,15 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - name: data + emptyDir: {} - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - global: - grafanaUrl: "" - grafana: - enabled: false - tracing: - enabled: false diff --git a/cli/cmd/testdata/upgrade_grafana_enabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled.yaml index d9bf6fa7b5824..5200f730f178a 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled.yaml @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1155,10 +1114,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1591,9 +1550,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1829,192 +1788,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2022,253 +1816,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 + path: /ready + port: 9995 securityContext: runAsUser: 2103 volumeMounts: @@ -2924,6 +2500,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: enabled: false --- @@ -3237,3 +2815,426 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw + JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 + MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r + ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg + Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ + Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB + /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe + aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC + IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R + SxZLbJKt6SJIIY9dw5gzQpUQR2U= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml index 97ccc21721ead..915a6b4d2fb91 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1152,10 +1111,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1588,9 +1547,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1825,183 +1784,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2009,52 +1812,45 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /ready + port: 9995 securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG value: warn,linkerd=info - name: LINKERD2_PROXY_LOG_FORMAT value: plain @@ -2084,8 +1880,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2185,41 +1979,80 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-proxy-injector volumes: - - name: data - emptyDir: {} - configMap: - name: linkerd-prometheus-config - name: prometheus-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2227,40 +2060,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2394,80 +2225,63 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2475,41 +2289,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2640,63 +2459,272 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + global: + grafanaUrl: "" + grafana: + enabled: false + prometheus: + enabled: true + tracing: + enabled: false +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2704,46 +2732,50 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 containers: - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 2103 + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2775,6 +2807,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2874,39 +2908,15 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - name: data + emptyDir: {} - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - global: - grafanaUrl: "" - grafana: - enabled: false - tracing: - enabled: false diff --git a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml index 1ce4f6f76e4db..79fde0daebe41 100644 --- a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1155,10 +1114,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1591,9 +1550,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1829,192 +1788,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2022,253 +1816,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 + path: /ready + port: 9995 securityContext: runAsUser: 2103 volumeMounts: @@ -2924,6 +2500,8 @@ data: image: name: linkerd-image-overwrite name: linkerd-grafana-overwrite + prometheus: + enabled: true tracing: enabled: false --- @@ -3237,3 +2815,426 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw + JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 + MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r + ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg + Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ + Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB + /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe + aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC + IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R + SxZLbJKt6SJIIY9dw5gzQpUQR2U= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_ha.golden b/cli/cmd/testdata/upgrade_ha.golden index 75290f6e4d349..8db5a3ce11686 100644 --- a/cli/cmd/testdata/upgrade_ha.golden +++ b/cli/cmd/testdata/upgrade_ha.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1214,10 +1173,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1699,9 +1658,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" resources: limits: cpu: "1" @@ -1957,192 +1916,30 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: - replicas: 1 + replicas: 3 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector + strategy: + rollingUpdate: + maxUnavailable: 1 template: metadata: annotations: @@ -2150,267 +1947,33 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 - containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus - ports: - - containerPort: 9090 - name: admin-http - readinessProbe: - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - resources: - limits: - cpu: "4" - memory: "8192Mi" - requests: - cpu: "300m" - memory: "300Mi" - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - limits: - cpu: "1" - memory: "250Mi" - requests: - cpu: "100m" - memory: "20Mi" - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 3 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - strategy: - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: failure-domain.beta.kubernetes.io/zone - weight: 100 - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: kubernetes.io/hostname + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: kubernetes.io/hostname containers: - args: - proxy-injector @@ -3180,6 +2743,15 @@ data: memory: limit: 1024Mi request: 50Mi + prometheus: + enabled: true + resources: + cpu: + limit: "4" + request: 300m + memory: + limit: 8192Mi + request: 300Mi tracing: enabled: false --- @@ -3506,3 +3078,439 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + resources: + limits: + cpu: "4" + memory: "8192Mi" + requests: + cpu: "300m" + memory: "300Mi" + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw + JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 + MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r + ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg + Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ + Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB + /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe + aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC + IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R + SxZLbJKt6SJIIY9dw5gzQpUQR2U= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + limits: + cpu: "1" + memory: "250Mi" + requests: + cpu: "100m" + memory: "20Mi" + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_ha_config.golden b/cli/cmd/testdata/upgrade_ha_config.golden index f9caa1e7f7794..046f8a2dc47ed 100644 --- a/cli/cmd/testdata/upgrade_ha_config.golden +++ b/cli/cmd/testdata/upgrade_ha_config.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -844,3 +803,44 @@ metadata: labels: linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd diff --git a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden index 812c0b1ead6dd..f9d8c2fe1be26 100644 --- a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden +++ b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1155,10 +1114,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1591,9 +1550,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1829,192 +1788,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2022,253 +1816,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,2525-2527,2529 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 + path: /ready + port: 9995 securityContext: runAsUser: 2103 volumeMounts: @@ -2924,6 +2500,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: enabled: false --- @@ -3237,3 +2815,426 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw + JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 + MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r + ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg + Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ + Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB + /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe + aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC + IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R + SxZLbJKt6SJIIY9dw5gzQpUQR2U= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191,2525-2527,2529 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_nothing_addon.yaml b/cli/cmd/testdata/upgrade_nothing_addon.yaml index d9bf6fa7b5824..5200f730f178a 100644 --- a/cli/cmd/testdata/upgrade_nothing_addon.yaml +++ b/cli/cmd/testdata/upgrade_nothing_addon.yaml @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1155,10 +1114,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1591,9 +1550,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1829,192 +1788,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2022,253 +1816,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 + path: /ready + port: 9995 securityContext: runAsUser: 2103 volumeMounts: @@ -2924,6 +2500,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: enabled: false --- @@ -3237,3 +2815,426 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw + JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 + MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r + ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg + Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ + Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB + /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe + aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC + IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R + SxZLbJKt6SJIIY9dw5gzQpUQR2U= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_overwrite_issuer.golden b/cli/cmd/testdata/upgrade_overwrite_issuer.golden index 46b8243d3baa8..0bbe6270b4efe 100644 --- a/cli/cmd/testdata/upgrade_overwrite_issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_issuer.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1153,10 +1112,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1585,9 +1544,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1821,192 +1780,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2014,251 +1808,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 + path: /ready + port: 9995 securityContext: runAsUser: 2103 volumeMounts: @@ -2908,6 +2486,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: enabled: false --- @@ -3219,3 +2799,424 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden index a597f64f221a4..2a39cad3afb1f 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1139,10 +1098,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1571,9 +1530,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1807,192 +1766,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2000,251 +1794,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 + path: /ready + port: 9995 securityContext: runAsUser: 2103 volumeMounts: @@ -2894,6 +2472,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: enabled: false --- @@ -3205,3 +2785,424 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden index 46b8243d3baa8..0bbe6270b4efe 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1153,10 +1112,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1585,9 +1544,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1821,192 +1780,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2014,251 +1808,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 + path: /ready + port: 9995 securityContext: runAsUser: 2103 volumeMounts: @@ -2908,6 +2486,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: enabled: false --- @@ -3219,3 +2799,424 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden index 69abd15afb838..c3055f4b133ae 100644 --- a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden +++ b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden @@ -383,47 +383,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1155,10 +1114,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1591,9 +1550,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- @@ -1829,192 +1788,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2022,253 +1816,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,2525-2527,2529 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 + path: /ready + port: 9995 securityContext: runAsUser: 2103 volumeMounts: @@ -2924,6 +2500,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: enabled: false --- @@ -3237,3 +2815,426 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + securityContext: + fsGroup: 65534 + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw + JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 + MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r + ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg + Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ + Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB + /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe + aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC + IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R + SxZLbJKt6SJIIY9dw5gzQpUQR2U= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191,2525-2527,2529 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock + serviceAccountName: linkerd-prometheus + volumes: + - name: data + emptyDir: {} + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/pkg/charts/linkerd2/addons.go b/pkg/charts/linkerd2/addons.go index 07b4877627bf9..797531e502da5 100644 --- a/pkg/charts/linkerd2/addons.go +++ b/pkg/charts/linkerd2/addons.go @@ -19,14 +19,6 @@ type AddOn interface { func ParseAddOnValues(values *Values) ([]AddOn, error) { var addOns []AddOn - if values.Tracing != nil { - if enabled, ok := values.Tracing["enabled"].(bool); !ok { - return nil, fmt.Errorf("invalid value for 'tracing.enabled' (should be boolean): %s", values.Tracing["enabled"]) - } else if enabled { - addOns = append(addOns, values.Tracing) - } - } - if values.Grafana != nil { if enabled, ok := values.Grafana["enabled"].(bool); !ok { return nil, fmt.Errorf("invalid value for 'grafana.enabled' (should be boolean): %s", values.Grafana["enabled"]) @@ -35,5 +27,21 @@ func ParseAddOnValues(values *Values) ([]AddOn, error) { } } + if values.Prometheus != nil { + if enabled, ok := values.Prometheus["enabled"].(bool); !ok { + return nil, fmt.Errorf("invalid value for 'prometheus.enabled' (should be boolean): %s", values.Prometheus["enabled"]) + } else if enabled { + addOns = append(addOns, values.Prometheus) + } + } + + if values.Tracing != nil { + if enabled, ok := values.Tracing["enabled"].(bool); !ok { + return nil, fmt.Errorf("invalid value for 'tracing.enabled' (should be boolean): %s", values.Tracing["enabled"]) + } else if enabled { + addOns = append(addOns, values.Tracing) + } + } + return addOns, nil } diff --git a/pkg/charts/linkerd2/prometheus.go b/pkg/charts/linkerd2/prometheus.go new file mode 100644 index 0000000000000..807aeadb17f38 --- /dev/null +++ b/pkg/charts/linkerd2/prometheus.go @@ -0,0 +1,41 @@ +package linkerd2 + +import ( + "k8s.io/helm/pkg/chartutil" + "sigs.k8s.io/yaml" +) + +var ( + prometheusAddOn = "prometheus" +) + +// Prometheus is an add-on that installs the prometheus component +type Prometheus map[string]interface{} + +// Name returns the name of the Tracing add-on +func (p Prometheus) Name() string { + return prometheusAddOn +} + +// Values returns the configuration values that were assigned for this add-on +func (p Prometheus) Values() []byte { + values, err := yaml.Marshal(p) + if err != nil { + return nil + } + return values +} + +// ConfigStageTemplates returns the template files that are part of the config stage +func (p Prometheus) ConfigStageTemplates() []*chartutil.BufferedFile { + return []*chartutil.BufferedFile{ + {Name: "templates/prometheus-rbac.yaml"}, + } +} + +// ControlPlaneStageTemplates returns the template files that are part of the Control Plane Stage. +func (p Prometheus) ControlPlaneStageTemplates() []*chartutil.BufferedFile { + return []*chartutil.BufferedFile{ + {Name: "templates/prometheus.yaml"}, + } +} diff --git a/pkg/charts/linkerd2/values.go b/pkg/charts/linkerd2/values.go index d8dfc27f2588c..3b370139de865 100644 --- a/pkg/charts/linkerd2/values.go +++ b/pkg/charts/linkerd2/values.go @@ -19,42 +19,35 @@ const ( type ( // Values contains the top-level elements in the Helm charts Values struct { - Stage string `json:"stage"` - ControllerImage string `json:"controllerImage"` - WebImage string `json:"webImage"` - PrometheusImage string `json:"prometheusImage"` - ControllerReplicas uint `json:"controllerReplicas"` - ControllerLogLevel string `json:"controllerLogLevel"` - PrometheusLogLevel string `json:"prometheusLogLevel"` - PrometheusExtraArgs map[string]string `json:"prometheusExtraArgs"` - PrometheusAlertmanagers []interface{} `json:"prometheusAlertmanagers"` - PrometheusRuleConfigMapMounts []PrometheusRuleConfigMapMount `json:"prometheusRuleConfigMapMounts"` - PrometheusPersistence Persistence `json:"prometheusPersistence"` - ControllerUID int64 `json:"controllerUID"` - EnableH2Upgrade bool `json:"enableH2Upgrade"` - EnablePodAntiAffinity bool `json:"enablePodAntiAffinity"` - WebhookFailurePolicy string `json:"webhookFailurePolicy"` - OmitWebhookSideEffects bool `json:"omitWebhookSideEffects"` - RestrictDashboardPrivileges bool `json:"restrictDashboardPrivileges"` - DisableHeartBeat bool `json:"disableHeartBeat"` - HeartbeatSchedule string `json:"heartbeatSchedule"` - InstallNamespace bool `json:"installNamespace"` - Configs ConfigJSONs `json:"configs"` - Global *Global `json:"global"` - Identity *Identity `json:"identity"` - Dashboard *Dashboard `json:"dashboard"` - DebugContainer *DebugContainer `json:"debugContainer"` - ProxyInjector *ProxyInjector `json:"proxyInjector"` - ProfileValidator *ProfileValidator `json:"profileValidator"` - Tap *Tap `json:"tap"` - NodeSelector map[string]string `json:"nodeSelector"` - Tolerations []interface{} `json:"tolerations"` - SMIMetrics *SMIMetrics `json:"smiMetrics"` + Stage string `json:"stage"` + ControllerImage string `json:"controllerImage"` + ControllerImageVersion string `json:"controllerImageVersion"` + WebImage string `json:"webImage"` + ControllerReplicas uint `json:"controllerReplicas"` + ControllerUID int64 `json:"controllerUID"` + EnableH2Upgrade bool `json:"enableH2Upgrade"` + EnablePodAntiAffinity bool `json:"enablePodAntiAffinity"` + WebhookFailurePolicy string `json:"webhookFailurePolicy"` + OmitWebhookSideEffects bool `json:"omitWebhookSideEffects"` + RestrictDashboardPrivileges bool `json:"restrictDashboardPrivileges"` + DisableHeartBeat bool `json:"disableHeartBeat"` + HeartbeatSchedule string `json:"heartbeatSchedule"` + InstallNamespace bool `json:"installNamespace"` + Configs ConfigJSONs `json:"configs"` + Global *Global `json:"global"` + Identity *Identity `json:"identity"` + Dashboard *Dashboard `json:"dashboard"` + DebugContainer *DebugContainer `json:"debugContainer"` + ProxyInjector *ProxyInjector `json:"proxyInjector"` + ProfileValidator *ProfileValidator `json:"profileValidator"` + Tap *Tap `json:"tap"` + NodeSelector map[string]string `json:"nodeSelector"` + Tolerations []interface{} `json:"tolerations"` + SMIMetrics *SMIMetrics `json:"smiMetrics"` DestinationResources *Resources `json:"destinationResources"` HeartbeatResources *Resources `json:"heartbeatResources"` IdentityResources *Resources `json:"identityResources"` - PrometheusResources *Resources `json:"prometheusResources"` ProxyInjectorResources *Resources `json:"proxyInjectorResources"` PublicAPIResources *Resources `json:"publicAPIResources"` SMIMetricsResources *Resources `json:"smiMetricsResources"` @@ -64,7 +57,6 @@ type ( DestinationProxyResources *Resources `json:"destinationProxyResources"` IdentityProxyResources *Resources `json:"identityProxyResources"` - PrometheusProxyResources *Resources `json:"prometheusProxyResources"` ProxyInjectorProxyResources *Resources `json:"proxyInjectorProxyResources"` PublicAPIProxyResources *Resources `json:"publicAPIProxyResources"` SMIMetricsProxyResources *Resources `json:"smiMetricsProxyResources"` @@ -73,8 +65,9 @@ type ( WebProxyResources *Resources `json:"webProxyResources"` // Addon Structures - Tracing Tracing `json:"tracing"` - Grafana Grafana `json:"grafana"` + Grafana Grafana `json:"grafana"` + Prometheus Prometheus `json:"prometheus"` + Tracing Tracing `json:"tracing"` } // Global values common across all charts @@ -85,6 +78,7 @@ type ( CliVersion string `json:"cliVersion"` ControllerComponentLabel string `json:"controllerComponentLabel"` ControllerImageVersion string `json:"controllerImageVersion"` + ControllerLogLevel string `json:"controllerLogLevel"` ControllerNamespaceLabel string `json:"controllerNamespaceLabel"` WorkloadNamespaceLabel string `json:"workloadNamespaceLabel"` CreatedByAnnotation string `json:"createdByAnnotation"` @@ -210,21 +204,6 @@ type ( TLS *IssuerTLS `json:"tls"` } - // Persistence represents PVC configuration. - Persistence struct { - Enabled bool `json:"enabled"` - StorageClass string `json:"storageClass"` - AccessMode string `json:"accessMode"` - Size string `json:"size"` - } - - // PrometheusRuleConfigMapMount is a user supplied prometheus rule config map. - PrometheusRuleConfigMapMount struct { - Name string `json:"name"` - SubPath string `json:"subPath"` - ConfigMap string `json:"configMap"` - } - // ProxyInjector has all the proxy injector's Helm variables ProxyInjector struct { *TLS diff --git a/pkg/charts/linkerd2/values_test.go b/pkg/charts/linkerd2/values_test.go index c093ed4a65820..081fba28bd799 100644 --- a/pkg/charts/linkerd2/values_test.go +++ b/pkg/charts/linkerd2/values_test.go @@ -14,32 +14,29 @@ func TestNewValues(t *testing.T) { testVersion := "linkerd-dev" expected := &Values{ - Stage: "", - ControllerImage: "gcr.io/linkerd-io/controller", - WebImage: "gcr.io/linkerd-io/web", - PrometheusImage: "prom/prometheus:v2.15.2", - ControllerReplicas: 1, - ControllerLogLevel: "info", - PrometheusLogLevel: "info", - PrometheusExtraArgs: map[string]string{}, - PrometheusAlertmanagers: []interface{}{}, - PrometheusRuleConfigMapMounts: []PrometheusRuleConfigMapMount{}, - PrometheusPersistence: Persistence{false, "", "ReadWriteOnce", "8Gi"}, - ControllerUID: 2103, - EnableH2Upgrade: true, - EnablePodAntiAffinity: false, - WebhookFailurePolicy: "Ignore", - OmitWebhookSideEffects: false, - RestrictDashboardPrivileges: false, - DisableHeartBeat: false, - HeartbeatSchedule: "0 0 * * *", - InstallNamespace: true, + Stage: "", + ControllerImage: "gcr.io/linkerd-io/controller", + WebImage: "gcr.io/linkerd-io/web", + ControllerReplicas: 1, + ControllerUID: 2103, + EnableH2Upgrade: true, + EnablePodAntiAffinity: false, + WebhookFailurePolicy: "Ignore", + OmitWebhookSideEffects: false, + RestrictDashboardPrivileges: false, + DisableHeartBeat: false, + HeartbeatSchedule: "0 0 * * *", + InstallNamespace: true, + Prometheus: Prometheus{ + "enabled": true, + }, Global: &Global{ Namespace: "linkerd", ClusterDomain: "cluster.local", ImagePullPolicy: "IfNotPresent", CliVersion: "linkerd/cli dev-undefined", ControllerComponentLabel: "linkerd.io/control-plane-component", + ControllerLogLevel: "info", ControllerImageVersion: testVersion, ControllerNamespaceLabel: "linkerd.io/control-plane-ns", WorkloadNamespaceLabel: "linkerd.io/workload-ns", @@ -164,11 +161,6 @@ func TestNewValues(t *testing.T) { t.Run("HA", func(t *testing.T) { actual, err := NewValues(true) - // workaround for mergo, which resets these to []interface{}(nil) - // and []PrometheusRuleConfigMapMount(nil) - actual.PrometheusAlertmanagers = []interface{}{} - actual.PrometheusRuleConfigMapMounts = []PrometheusRuleConfigMapMount{} - if err != nil { t.Fatalf("Unexpected error: %v\n", err) } @@ -224,14 +216,17 @@ func TestNewValues(t *testing.T) { }, } - expected.PrometheusResources = &Resources{ - CPU: Constraints{ - Limit: "4", - Request: "300m", - }, - Memory: Constraints{ - Limit: "8192Mi", - Request: "300Mi", + expected.Prometheus = Prometheus{ + "enabled": true, + "resources": map[string]interface{}{ + "cpu": map[string]interface{}{ + "limit": "4", + "request": "300m", + }, + "memory": map[string]interface{}{ + "limit": "8192Mi", + "request": "300Mi", + }, }, } diff --git a/pkg/healthcheck/healthcheck.go b/pkg/healthcheck/healthcheck.go index e76d2fe7b50a5..6165de7e29800 100644 --- a/pkg/healthcheck/healthcheck.go +++ b/pkg/healthcheck/healthcheck.go @@ -182,7 +182,6 @@ var ExpectedServiceAccountNames = []string{ "linkerd-controller", "linkerd-destination", "linkerd-identity", - "linkerd-prometheus", "linkerd-proxy-injector", "linkerd-sp-validator", "linkerd-web", diff --git a/test/integration/install_test.go b/test/integration/install_test.go index 837194b414f18..fbc5e97c417de 100644 --- a/test/integration/install_test.go +++ b/test/integration/install_test.go @@ -285,7 +285,7 @@ func TestInstallOrUpgradeCli(t *testing.T) { // These need to be updated (if there are changes) once a new stable is released func helmOverridesStable(root *tls.CA) []string { return []string{ - "--set", "controllerLogLevel=debug", + "--set", "global.controllerLogLevel=debug", "--set", "global.linkerdVersion=" + TestHelper.UpgradeHelmFromVersion(), "--set", "global.proxy.image.version=" + TestHelper.UpgradeHelmFromVersion(), "--set", "global.identityTrustDomain=cluster.local", @@ -299,7 +299,7 @@ func helmOverridesStable(root *tls.CA) []string { // These need to correspond to the flags in the current edge func helmOverridesEdge(root *tls.CA) []string { return []string{ - "--set", "controllerLogLevel=debug", + "--set", "global.controllerLogLevel=debug", "--set", "global.linkerdVersion=" + TestHelper.GetVersion(), "--set", "global.proxy.image.version=" + TestHelper.GetVersion(), "--set", "global.identityTrustDomain=cluster.local", @@ -417,8 +417,8 @@ func TestUpgradeHelm(t *testing.T) { "--set", "grafana.proxy.resources.memory.request=103Mi", "--set", "identityProxyResources.cpu.limit=1040m", "--set", "identityProxyResources.memory.request=104Mi", - "--set", "prometheusProxyResources.cpu.limit=1050m", - "--set", "prometheusProxyResources.memory.request=105Mi", + "--set", "prometheus.proxy.resources.cpu.limit=1050m", + "--set", "prometheus.proxy.resources.memory.request=105Mi", "--set", "proxyInjectorProxyResources.cpu.limit=1060m", "--set", "proxyInjectorProxyResources.memory.request=106Mi", "--set", "smiMetricsProxyResources.cpu.limit=1070m",