From 913ca77632356615ee67640ef67fdb038b29600b Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Wed, 6 May 2020 12:48:39 +0530 Subject: [PATCH 01/42] move prometheus templates as a add-on Signed-off-by: Tarun Pothulapati --- charts/add-ons/prometheus/.helmignore | 22 ++++++ charts/add-ons/prometheus/Chart.yaml | 9 +++ charts/add-ons/prometheus/requirements.yaml | 5 ++ .../templates/prometheus-rbac.yaml | 10 +-- .../prometheus}/templates/prometheus.yaml | 30 ++++---- charts/add-ons/prometheus/values.yaml | 68 +++++++++++++++++++ .../templates/linkerd-config-addons.yaml | 2 + charts/linkerd2/values-ha.yaml | 15 ++-- charts/linkerd2/values.yaml | 39 ++++++----- cli/cmd/install.go | 4 +- cli/cmd/install_addon_test.go | 6 +- cli/cmd/install_test.go | 4 +- pkg/charts/linkerd2/addons.go | 8 +++ pkg/charts/linkerd2/prometheus.go | 41 +++++++++++ pkg/charts/linkerd2/values.go | 60 ++++++++-------- pkg/charts/linkerd2/values_test.go | 68 +++++++++---------- 16 files changed, 274 insertions(+), 117 deletions(-) create mode 100644 charts/add-ons/prometheus/.helmignore create mode 100644 charts/add-ons/prometheus/Chart.yaml create mode 100644 charts/add-ons/prometheus/requirements.yaml rename charts/{linkerd2 => add-ons/prometheus}/templates/prometheus-rbac.yaml (81%) rename charts/{linkerd2 => add-ons/prometheus}/templates/prometheus.yaml (92%) create mode 100644 charts/add-ons/prometheus/values.yaml create mode 100644 pkg/charts/linkerd2/prometheus.go diff --git a/charts/add-ons/prometheus/.helmignore b/charts/add-ons/prometheus/.helmignore new file mode 100644 index 0000000000000..50af031725419 --- /dev/null +++ b/charts/add-ons/prometheus/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/add-ons/prometheus/Chart.yaml b/charts/add-ons/prometheus/Chart.yaml new file mode 100644 index 0000000000000..ddcfc4531ffe9 --- /dev/null +++ b/charts/add-ons/prometheus/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for the prometheus add-on in Linkerd +name: prometheus +version: 0.1.0 +maintainers: + - name: Linkerd authors + email: cncf-linkerd-dev@lists.cncf.io + url: https://linkerd.io/ \ No newline at end of file diff --git a/charts/add-ons/prometheus/requirements.yaml b/charts/add-ons/prometheus/requirements.yaml new file mode 100644 index 0000000000000..8266e94ae195b --- /dev/null +++ b/charts/add-ons/prometheus/requirements.yaml @@ -0,0 +1,5 @@ +dependencies: + - name: partials + version: 0.1.0 + repository: file://../../partials + \ No newline at end of file diff --git a/charts/linkerd2/templates/prometheus-rbac.yaml b/charts/add-ons/prometheus/templates/prometheus-rbac.yaml similarity index 81% rename from charts/linkerd2/templates/prometheus-rbac.yaml rename to charts/add-ons/prometheus/templates/prometheus-rbac.yaml index 9fa4c0d62dbeb..d3dd458ca0db4 100644 --- a/charts/linkerd2/templates/prometheus-rbac.yaml +++ b/charts/add-ons/prometheus/templates/prometheus-rbac.yaml @@ -6,7 +6,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: linkerd-{{.Values.global.namespace}}-prometheus + name: {{.Values.global.namespace}}-{{.Values.name}} labels: {{.Values.global.controllerComponentLabel}}: prometheus {{.Values.global.controllerNamespaceLabel}}: {{.Values.global.namespace}} @@ -18,23 +18,23 @@ rules: kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: linkerd-{{.Values.global.namespace}}-prometheus + name: {{.Values.global.namespace}}-{{.Values.name}} labels: {{.Values.global.controllerComponentLabel}}: prometheus {{.Values.global.controllerNamespaceLabel}}: {{.Values.global.namespace}} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: linkerd-{{.Values.global.namespace}}-prometheus + name: {{.Values.global.namespace}}-{{.Values.name}} subjects: - kind: ServiceAccount - name: linkerd-prometheus + name: {{.Values.name}} namespace: {{.Values.global.namespace}} --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-prometheus + name: {{.Values.name}} namespace: {{.Values.global.namespace}} labels: {{.Values.global.controllerComponentLabel}}: prometheus diff --git a/charts/linkerd2/templates/prometheus.yaml b/charts/add-ons/prometheus/templates/prometheus.yaml similarity index 92% rename from charts/linkerd2/templates/prometheus.yaml rename to charts/add-ons/prometheus/templates/prometheus.yaml index d190d62e1d03b..289b9bbd8d0d2 100644 --- a/charts/linkerd2/templates/prometheus.yaml +++ b/charts/add-ons/prometheus/templates/prometheus.yaml @@ -6,7 +6,7 @@ kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: {{.Values.name}}-config namespace: {{.Values.global.namespace}} labels: {{.Values.global.controllerComponentLabel}}: prometheus @@ -15,10 +15,10 @@ metadata: {{.Values.global.createdByAnnotation}}: {{default (printf "linkerd/helm %s" .Values.global.linkerdVersion) .Values.global.cliVersion}} data: prometheus.yml: |- - {{- if .Values.prometheusAlertmanagers }} + {{- if .Values.alertmanagers }} alerting: alertmanagers: - {{- toYaml .Values.prometheusAlertmanagers | trim | nindent 8 }} + {{- toYaml .Values.alertmanagers | trim | nindent 8 }} {{- end }} global: scrape_interval: 10s @@ -155,7 +155,7 @@ data: kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: {{.Values.name}} namespace: {{.Values.global.namespace}} labels: {{.Values.global.controllerComponentLabel}}: prometheus @@ -175,7 +175,7 @@ spec: {{ $_ := set .Values.global.proxy.image "Version" .Values.global.linkerdVersion -}} {{ end -}} {{ $_ := set .Values.global.proxy "workloadKind" "deployment" -}} -{{ $_ := set .Values.global.proxy "component" "linkerd-prometheus" -}} +{{ $_ := set .Values.global.proxy "component" ".Values.name" -}} {{ include "linkerd.proxy.validation" .Values.global.proxy -}} apiVersion: apps/v1 kind: Deployment @@ -188,7 +188,7 @@ metadata: app.kubernetes.io/version: {{default .Values.global.linkerdVersion .Values.controllerImageVersion}} {{.Values.global.controllerComponentLabel}}: prometheus {{.Values.global.controllerNamespaceLabel}}: {{.Values.global.namespace}} - name: linkerd-prometheus + name: {{.Values.name}} namespace: {{.Values.global.namespace}} spec: replicas: 1 @@ -214,11 +214,11 @@ spec: - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - - --log.level={{lower .Values.prometheusLogLevel}} - {{- range $key, $value := .Values.prometheusExtraArgs}} + - --log.level={{lower .Values.prometheus.logLevel}} + {{- range $key, $value := .Values.args}} - --{{ $key }}{{ if $value }}={{ $value }}{{ end }} {{- end }} - image: {{.Values.prometheusImage}} + image: {{.Values.image}} imagePullPolicy: {{.Values.global.imagePullPolicy}} livenessProbe: httpGet: @@ -236,13 +236,13 @@ spec: port: 9090 initialDelaySeconds: 30 timeoutSeconds: 30 - {{- if .Values.prometheusResources -}} - {{- include "partials.resources" .Values.prometheusResources | nindent 8 }} + {{- if .Values.resources -}} + {{- include "partials.resources" .Values.resources | nindent 8 }} {{- end }} securityContext: runAsUser: 65534 volumeMounts: - {{- range .Values.prometheusRuleConfigMapMounts }} + {{- range .Values.ruleCofigMapMounts }} - name: {{ .name }} mountPath: /etc/prometheus/{{ .subPath }} subPath: {{ .subPath }} @@ -259,9 +259,9 @@ spec: initContainers: - {{- include "partials.proxy-init" . | indent 8 | trimPrefix (repeat 7 " ") }} {{ end -}} - serviceAccountName: linkerd-prometheus + serviceAccountName: {{.Values.name}} volumes: - {{- range .Values.prometheusRuleConfigMapMounts }} + {{- range .Values.ruleCofigMapMounts }} - name: {{ .name }} configMap: name: {{ .configMap }} @@ -269,7 +269,7 @@ spec: - emptyDir: {} name: data - configMap: - name: linkerd-prometheus-config + name: {{.Values.name}}-config name: prometheus-config {{ if .Values.global.controlPlaneTracing -}} - {{- include "partials.proxy.volumes.labels" . | indent 8 | trimPrefix (repeat 7 " ") }} diff --git a/charts/add-ons/prometheus/values.yaml b/charts/add-ons/prometheus/values.yaml new file mode 100644 index 0000000000000..db246ce2a9d47 --- /dev/null +++ b/charts/add-ons/prometheus/values.yaml @@ -0,0 +1,68 @@ +# Default values for prometheus. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + tag: stable + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/charts/linkerd2/templates/linkerd-config-addons.yaml b/charts/linkerd2/templates/linkerd-config-addons.yaml index 287e5c3a8e036..a94cdefcf1400 100644 --- a/charts/linkerd2/templates/linkerd-config-addons.yaml +++ b/charts/linkerd2/templates/linkerd-config-addons.yaml @@ -20,5 +20,7 @@ metadata: {{.Values.global.createdByAnnotation}}: {{default (printf "linkerd/helm %s" .Values.global.linkerdVersion) .Values.global.cliVersion}} data: values: |- + prometheus: + {{- include "linkerd.addons.sanitize-config" .Values.prometheus}} tracing: {{- include "linkerd.addons.sanitize-config" .Values.tracing}} diff --git a/charts/linkerd2/values-ha.yaml b/charts/linkerd2/values-ha.yaml index de5e57c787f9c..c8d2b1f5aa514 100644 --- a/charts/linkerd2/values-ha.yaml +++ b/charts/linkerd2/values-ha.yaml @@ -45,13 +45,14 @@ grafanaResources: heartbeatResources: *controller_resources # prometheus configuration -prometheusResources: - cpu: - limit: "4" - request: 300m - memory: - limit: 8192Mi - request: 300Mi +prometheus: + resources: + cpu: + limit: "4" + request: 300m + memory: + limit: 8192Mi + request: 300Mi # proxy injector configuration proxyInjectorResources: *controller_resources diff --git a/charts/linkerd2/values.yaml b/charts/linkerd2/values.yaml index 92cc7ffcef041..5279d7be18693 100644 --- a/charts/linkerd2/values.yaml +++ b/charts/linkerd2/values.yaml @@ -130,24 +130,6 @@ grafanaImage: gcr.io/linkerd-io/grafana disableHeartBeat: false heartbeatSchedule: "0 0 * * *" -# prometheus configuration -prometheusImage: prom/prometheus:v2.15.2 -prometheusLogLevel: *controller_log_level -prometheusExtraArgs: {} - # log.format: json -prometheusAlertmanagers: [] - # - scheme: http - # static_configs: - # - targets: - # - "alertmanager.linkerd.svc:9093" -prometheusRuleConfigMapMounts: [] - # - name: alerting-rules - # subPath: alerting_rules.yml - # configMap: linkerd-prometheus-rules - # - name: recording-rules - # subPath: recording_rules.yml - # configMap: linkerd-prometheus-rules - # proxy injector configuration proxyInjector: # if empty, Helm will auto-generate these fields @@ -193,6 +175,27 @@ smiMetrics: keyPEM: | # Configuration for Add-ons + +prometheus: + enabled: true + name: linkerd-prometheus + image: prom/prometheus:v2.15.2 + prometheusLogLevel: *controller_log_level + # args: + # - log.format: json + # alertManagers: + # - scheme: http + # static_configs: + # - targets: + # - "alertmanager.linkerd.svc:9093" + # ruleConfigMapMounts: + # - name: alerting-rules + # subPath: alerting_rules.yml + # configMap: linkerd-prometheus-rules + # - name: recording-rules + # subPath: recording_rules.yml + # configMap: linkerd-prometheus-rules + tracing: enabled: false collector: diff --git a/cli/cmd/install.go b/cli/cmd/install.go index 0f06de1645099..7a60867809f5c 100644 --- a/cli/cmd/install.go +++ b/cli/cmd/install.go @@ -185,7 +185,7 @@ func newInstallOptionsWithDefaults() (*installOptions, error) { controlPlaneVersion: version.Version, controllerReplicas: defaults.ControllerReplicas, controllerLogLevel: defaults.ControllerLogLevel, - prometheusImage: defaults.PrometheusImage, + prometheusImage: defaults.Prometheus["image"].(string), highAvailability: defaults.Global.HighAvailability, controllerUID: defaults.ControllerUID, disableH2Upgrade: !defaults.EnableH2Upgrade, @@ -767,7 +767,7 @@ func (options *installOptions) buildValuesWithoutIdentity(configs *pb.All) (*l5d installValues.Global.ImagePullPolicy = options.imagePullPolicy installValues.GrafanaImage = fmt.Sprintf("%s/grafana", options.dockerRegistry) if options.prometheusImage != "" { - installValues.PrometheusImage = options.prometheusImage + installValues.Prometheus["image"] = options.prometheusImage } installValues.Global.Namespace = controlPlaneNamespace installValues.Global.CNIEnabled = options.cniEnabled diff --git a/cli/cmd/install_addon_test.go b/cli/cmd/install_addon_test.go index 59f6764c20d68..9141b5c8863e4 100644 --- a/cli/cmd/install_addon_test.go +++ b/cli/cmd/install_addon_test.go @@ -43,7 +43,7 @@ func TestMergeRaw(t *testing.T) { t.Run("Test Ovewriting of Values struct", func(*testing.T) { initialValues := charts.Values{ - PrometheusImage: "initial-prometheus", + WebImage: "initial-web", EnableH2Upgrade: true, ControllerReplicas: 1, OmitWebhookSideEffects: false, @@ -55,14 +55,14 @@ func TestMergeRaw(t *testing.T) { // partially by using omitempty, but then we don't have relevant checks in helm templates as they would // be nil when omitempty is present. rawOverwriteValues := ` -prometheusImage: override-prometheus +webImage: override-web enableH2Upgrade: false controllerReplicas: 2 omitWebhookSideEffects: true enablePodAntiAffinity: true` expectedValues := charts.Values{ - PrometheusImage: "override-prometheus", + WebImage: "override-web", EnableH2Upgrade: false, ControllerReplicas: 2, OmitWebhookSideEffects: true, diff --git a/cli/cmd/install_test.go b/cli/cmd/install_test.go index add62eb54d49a..71ec3ead60408 100644 --- a/cli/cmd/install_test.go +++ b/cli/cmd/install_test.go @@ -56,7 +56,6 @@ func TestRender(t *testing.T) { ControllerImage: "ControllerImage", ControllerImageVersion: "ControllerImageVersion", WebImage: "WebImage", - PrometheusImage: "PrometheusImage", GrafanaImage: "GrafanaImage", ControllerLogLevel: "ControllerLogLevel", PrometheusLogLevel: "PrometheusLogLevel", @@ -68,6 +67,9 @@ func TestRender(t *testing.T) { InstallNamespace: true, Identity: defaultValues.Identity, NodeSelector: defaultValues.NodeSelector, + Prometheus: charts.Prometheus{ + "image": "PrometheusImage", + }, Global: &charts.Global{ Namespace: "Namespace", ClusterDomain: "cluster.local", diff --git a/pkg/charts/linkerd2/addons.go b/pkg/charts/linkerd2/addons.go index ecc0d78a1c10d..4b575ef57daa3 100644 --- a/pkg/charts/linkerd2/addons.go +++ b/pkg/charts/linkerd2/addons.go @@ -27,5 +27,13 @@ func ParseAddOnValues(values *Values) ([]AddOn, error) { } } + if values.Prometheus != nil { + if enabled, ok := values.Prometheus["enabled"].(bool); !ok { + return nil, fmt.Errorf("invalid value for 'prometheus.enabled' (should be boolean): %s", values.Prometheus["enabled"]) + } else if enabled { + addOns = append(addOns, values.Prometheus) + } + } + return addOns, nil } diff --git a/pkg/charts/linkerd2/prometheus.go b/pkg/charts/linkerd2/prometheus.go new file mode 100644 index 0000000000000..f6360daf7e95f --- /dev/null +++ b/pkg/charts/linkerd2/prometheus.go @@ -0,0 +1,41 @@ +package linkerd2 + +import ( + "k8s.io/helm/pkg/chartutil" + "sigs.k8s.io/yaml" +) + +var ( + prometheusAddOn = "tracing" +) + +// Prometheus is an add-on that installs the prometheus component +type Prometheus map[string]interface{} + +// Name returns the name of the Tracing add-on +func (p Prometheus) Name() string { + return prometheusAddOn +} + +// Values returns the configuration values that were assigned for this add-on +func (p Prometheus) Values() []byte { + values, err := yaml.Marshal(p) + if err != nil { + return nil + } + return values +} + +// ConfigStageTemplates returns the template files that are part of the config stage +func (p Prometheus) ConfigStageTemplates() []*chartutil.BufferedFile { + return []*chartutil.BufferedFile{ + {Name: "templates/prometheus-rbac.yaml"}, + } +} + +// ControlPlaneStageTemplates returns the template files that are part of the Control Plane Stage. +func (p Prometheus) ControlPlaneStageTemplates() []*chartutil.BufferedFile { + return []*chartutil.BufferedFile{ + {Name: "templates/prometheus.yaml"}, + } +} diff --git a/pkg/charts/linkerd2/values.go b/pkg/charts/linkerd2/values.go index 7ef3ed52500c1..17ee907d0c8bb 100644 --- a/pkg/charts/linkerd2/values.go +++ b/pkg/charts/linkerd2/values.go @@ -19,37 +19,32 @@ const ( type ( // Values contains the top-level elements in the Helm charts Values struct { - Stage string `json:"stage"` - ControllerImage string `json:"controllerImage"` - ControllerImageVersion string `json:"controllerImageVersion"` - WebImage string `json:"webImage"` - PrometheusImage string `json:"prometheusImage"` - GrafanaImage string `json:"grafanaImage"` - ControllerReplicas uint `json:"controllerReplicas"` - ControllerLogLevel string `json:"controllerLogLevel"` - PrometheusLogLevel string `json:"prometheusLogLevel"` - PrometheusExtraArgs map[string]string `json:"prometheusExtraArgs"` - PrometheusAlertmanagers []interface{} `json:"prometheusAlertmanagers"` - PrometheusRuleConfigMapMounts []PrometheusRuleConfigMapMount `json:"prometheusRuleConfigMapMounts"` - ControllerUID int64 `json:"controllerUID"` - EnableH2Upgrade bool `json:"enableH2Upgrade"` - EnablePodAntiAffinity bool `json:"enablePodAntiAffinity"` - WebhookFailurePolicy string `json:"webhookFailurePolicy"` - OmitWebhookSideEffects bool `json:"omitWebhookSideEffects"` - RestrictDashboardPrivileges bool `json:"restrictDashboardPrivileges"` - DisableHeartBeat bool `json:"disableHeartBeat"` - HeartbeatSchedule string `json:"heartbeatSchedule"` - InstallNamespace bool `json:"installNamespace"` - Configs ConfigJSONs `json:"configs"` - Global *Global `json:"global"` - Identity *Identity `json:"identity"` - Dashboard *Dashboard `json:"dashboard"` - DebugContainer *DebugContainer `json:"debugContainer"` - ProxyInjector *ProxyInjector `json:"proxyInjector"` - ProfileValidator *ProfileValidator `json:"profileValidator"` - Tap *Tap `json:"tap"` - NodeSelector map[string]string `json:"nodeSelector"` - SMIMetrics *SMIMetrics `json:"smiMetrics"` + Stage string `json:"stage"` + ControllerImage string `json:"controllerImage"` + ControllerImageVersion string `json:"controllerImageVersion"` + WebImage string `json:"webImage"` + GrafanaImage string `json:"grafanaImage"` + ControllerReplicas uint `json:"controllerReplicas"` + ControllerLogLevel string `json:"controllerLogLevel"` + ControllerUID int64 `json:"controllerUID"` + EnableH2Upgrade bool `json:"enableH2Upgrade"` + EnablePodAntiAffinity bool `json:"enablePodAntiAffinity"` + WebhookFailurePolicy string `json:"webhookFailurePolicy"` + OmitWebhookSideEffects bool `json:"omitWebhookSideEffects"` + RestrictDashboardPrivileges bool `json:"restrictDashboardPrivileges"` + DisableHeartBeat bool `json:"disableHeartBeat"` + HeartbeatSchedule string `json:"heartbeatSchedule"` + InstallNamespace bool `json:"installNamespace"` + Configs ConfigJSONs `json:"configs"` + Global *Global `json:"global"` + Identity *Identity `json:"identity"` + Dashboard *Dashboard `json:"dashboard"` + DebugContainer *DebugContainer `json:"debugContainer"` + ProxyInjector *ProxyInjector `json:"proxyInjector"` + ProfileValidator *ProfileValidator `json:"profileValidator"` + Tap *Tap `json:"tap"` + NodeSelector map[string]string `json:"nodeSelector"` + SMIMetrics *SMIMetrics `json:"smiMetrics"` DestinationResources *Resources `json:"destinationResources"` GrafanaResources *Resources `json:"grafanaResources"` @@ -63,7 +58,8 @@ type ( WebResources *Resources `json:"webResources"` // Addon Structures - Tracing Tracing `json:"tracing"` + Prometheus Prometheus `json:"prometheus"` + Tracing Tracing `json:"tracing"` } // Global values common across all charts diff --git a/pkg/charts/linkerd2/values_test.go b/pkg/charts/linkerd2/values_test.go index 0c65178e8e4de..e129f42c97a0a 100644 --- a/pkg/charts/linkerd2/values_test.go +++ b/pkg/charts/linkerd2/values_test.go @@ -14,27 +14,27 @@ func TestNewValues(t *testing.T) { testVersion := "linkerd-dev" expected := &Values{ - Stage: "", - ControllerImage: "gcr.io/linkerd-io/controller", - ControllerImageVersion: testVersion, - WebImage: "gcr.io/linkerd-io/web", - PrometheusImage: "prom/prometheus:v2.15.2", - GrafanaImage: "gcr.io/linkerd-io/grafana", - ControllerReplicas: 1, - ControllerLogLevel: "info", - PrometheusLogLevel: "info", - PrometheusExtraArgs: map[string]string{}, - PrometheusAlertmanagers: []interface{}{}, - PrometheusRuleConfigMapMounts: []PrometheusRuleConfigMapMount{}, - ControllerUID: 2103, - EnableH2Upgrade: true, - EnablePodAntiAffinity: false, - WebhookFailurePolicy: "Ignore", - OmitWebhookSideEffects: false, - RestrictDashboardPrivileges: false, - DisableHeartBeat: false, - HeartbeatSchedule: "0 0 * * *", - InstallNamespace: true, + Stage: "", + ControllerImage: "gcr.io/linkerd-io/controller", + ControllerImageVersion: testVersion, + WebImage: "gcr.io/linkerd-io/web", + GrafanaImage: "gcr.io/linkerd-io/grafana", + ControllerReplicas: 1, + ControllerLogLevel: "info", + ControllerUID: 2103, + EnableH2Upgrade: true, + EnablePodAntiAffinity: false, + WebhookFailurePolicy: "Ignore", + OmitWebhookSideEffects: false, + RestrictDashboardPrivileges: false, + DisableHeartBeat: false, + HeartbeatSchedule: "0 0 * * *", + InstallNamespace: true, + Prometheus: Prometheus{ + "enabled": true, + "name": "linkerd-prometheus", + "image": "prom/prometheus:v2.15.2", + }, Global: &Global{ Namespace: "linkerd", ClusterDomain: "cluster.local", @@ -152,11 +152,6 @@ func TestNewValues(t *testing.T) { t.Run("HA", func(t *testing.T) { actual, err := NewValues(true) - // workaround for mergo, which resets these to []interface{}(nil) - // and []PrometheusRuleConfigMapMount(nil) - actual.PrometheusAlertmanagers = []interface{}{} - actual.PrometheusRuleConfigMapMounts = []PrometheusRuleConfigMapMount{} - if err != nil { t.Fatalf("Unexpected error: %v\n", err) } @@ -205,14 +200,19 @@ func TestNewValues(t *testing.T) { }, } - expected.PrometheusResources = &Resources{ - CPU: Constraints{ - Limit: "4", - Request: "300m", - }, - Memory: Constraints{ - Limit: "8192Mi", - Request: "300Mi", + expected.Prometheus = Prometheus{ + "enabled": true, + "name": "linkerd-prometheus", + "image": "prom/prometheus:v2.15.2", + "resources": map[string]interface{}{ + "cpu": map[string]interface{}{ + "limit": "4", + "request": "300m", + }, + "memory": map[string]interface{}{ + "limit": "8192Mi", + "request": "300Mi", + }, }, } From 0d675afc45b6940375395b003082e99d5f9f88b5 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Wed, 6 May 2020 16:10:50 +0530 Subject: [PATCH 02/42] remove prom flags and fix fiields Signed-off-by: Tarun Pothulapati --- .../prometheus/templates/prometheus.yaml | 2 +- charts/linkerd2/values.yaml | 2 +- cli/cmd/install.go | 4 +- cli/cmd/install_test.go | 5 +- cli/cmd/testdata/install_config.golden | 82 +- cli/cmd/testdata/install_control-plane.golden | 829 ++++++------ ...install_controlplane_tracing_output.golden | 913 ++++++------- .../testdata/install_custom_registry.golden | 911 ++++++------- cli/cmd/testdata/install_default.golden | 911 ++++++------- cli/cmd/testdata/install_ha_output.golden | 988 +++++++------- .../install_ha_with_overrides_output.golden | 988 +++++++------- .../install_heartbeat_disabled_output.golden | 911 ++++++------- cli/cmd/testdata/install_helm_output.golden | 931 ++++++------- .../install_helm_output_addons.golden | 1155 +++++++++-------- .../testdata/install_helm_output_ha.golden | 1028 +++++++-------- .../testdata/install_no_init_container.golden | 889 ++++++------- cli/cmd/testdata/install_proxy_ignores.golden | 911 ++++++------- .../install_restricted_dashboard.golden | 911 ++++++------- cli/cmd/testdata/upgrade_default.golden | 911 ++++++------- .../testdata/upgrade_external_issuer.golden | 911 ++++++------- cli/cmd/testdata/upgrade_ha.golden | 988 +++++++------- cli/cmd/testdata/upgrade_ha_config.golden | 82 +- .../testdata/upgrade_overwrite_issuer.golden | 911 ++++++------- ...write_trust_anchors-external-issuer.golden | 911 ++++++------- .../upgrade_overwrite_trust_anchors.golden | 911 ++++++------- pkg/charts/linkerd2/prometheus.go | 2 +- pkg/charts/linkerd2/values_test.go | 14 +- 27 files changed, 9034 insertions(+), 8978 deletions(-) diff --git a/charts/add-ons/prometheus/templates/prometheus.yaml b/charts/add-ons/prometheus/templates/prometheus.yaml index 289b9bbd8d0d2..06f8531446b74 100644 --- a/charts/add-ons/prometheus/templates/prometheus.yaml +++ b/charts/add-ons/prometheus/templates/prometheus.yaml @@ -214,7 +214,7 @@ spec: - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - - --log.level={{lower .Values.prometheus.logLevel}} + - --log.level={{lower .Values.logLevel}} {{- range $key, $value := .Values.args}} - --{{ $key }}{{ if $value }}={{ $value }}{{ end }} {{- end }} diff --git a/charts/linkerd2/values.yaml b/charts/linkerd2/values.yaml index 5279d7be18693..a9b6f16f08ba5 100644 --- a/charts/linkerd2/values.yaml +++ b/charts/linkerd2/values.yaml @@ -180,7 +180,7 @@ prometheus: enabled: true name: linkerd-prometheus image: prom/prometheus:v2.15.2 - prometheusLogLevel: *controller_log_level + logLevel: *controller_log_level # args: # - log.format: json # alertManagers: diff --git a/cli/cmd/install.go b/cli/cmd/install.go index 7a60867809f5c..643e93559204a 100644 --- a/cli/cmd/install.go +++ b/cli/cmd/install.go @@ -129,7 +129,6 @@ var ( "templates/web-rbac.yaml", "templates/serviceprofile-crd.yaml", "templates/trafficsplit-crd.yaml", - "templates/prometheus-rbac.yaml", "templates/grafana-rbac.yaml", "templates/proxy-injector-rbac.yaml", "templates/sp-validator-rbac.yaml", @@ -147,7 +146,6 @@ var ( "templates/destination.yaml", "templates/heartbeat.yaml", "templates/web.yaml", - "templates/prometheus.yaml", "templates/grafana.yaml", "templates/proxy-injector.yaml", "templates/sp-validator.yaml", @@ -772,7 +770,7 @@ func (options *installOptions) buildValuesWithoutIdentity(configs *pb.All) (*l5d installValues.Global.Namespace = controlPlaneNamespace installValues.Global.CNIEnabled = options.cniEnabled installValues.OmitWebhookSideEffects = options.omitWebhookSideEffects - installValues.PrometheusLogLevel = toPromLogLevel(strings.ToLower(options.controllerLogLevel)) + installValues.Prometheus["logLevel"] = toPromLogLevel(strings.ToLower(options.controllerLogLevel)) installValues.HeartbeatSchedule = options.heartbeatSchedule() installValues.RestrictDashboardPrivileges = options.restrictDashboardPrivileges installValues.DisableHeartBeat = options.disableHeartbeat diff --git a/cli/cmd/install_test.go b/cli/cmd/install_test.go index 71ec3ead60408..37d52981953e4 100644 --- a/cli/cmd/install_test.go +++ b/cli/cmd/install_test.go @@ -58,7 +58,6 @@ func TestRender(t *testing.T) { WebImage: "WebImage", GrafanaImage: "GrafanaImage", ControllerLogLevel: "ControllerLogLevel", - PrometheusLogLevel: "PrometheusLogLevel", ControllerUID: 2103, EnableH2Upgrade: true, WebhookFailurePolicy: "WebhookFailurePolicy", @@ -362,8 +361,8 @@ func TestValidate(t *testing.T) { t.Fatalf("Unexpected error occurred %s", err) } - if actual.PrometheusLogLevel != expected { - t.Fatalf("Expected error string\"%s\", got \"%s\"", expected, actual.PrometheusLogLevel) + if actual.Prometheus["logLevel"] != expected { + t.Fatalf("Expected error string\"%s\", got \"%s\"", expected, actual.Prometheus["logLevel"]) } }) diff --git a/cli/cmd/testdata/install_config.golden b/cli/cmd/testdata/install_config.golden index 1b4815643e7bf..b7a1bb4d99f62 100644 --- a/cli/cmd/testdata/install_config.golden +++ b/cli/cmd/testdata/install_config.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Grafana RBAC ### --- @@ -838,3 +797,44 @@ subjects: name: linkerd-web namespace: linkerd +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd diff --git a/cli/cmd/testdata/install_control-plane.golden b/cli/cmd/testdata/install_control-plane.golden index d77eb8fbb4573..d7db80a8807ae 100644 --- a/cli/cmd/testdata/install_control-plane.golden +++ b/cli/cmd/testdata/install_control-plane.golden @@ -960,171 +960,87 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Grafana ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + grafana.ini: |- + instance_name = linkerd-grafana - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] + [auth] + disable_login_form = true - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ + [auth.anonymous] + enabled = true + org_role = Editor - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + [auth.basic] + enabled = false - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop + [analytics] + check_for_updates = false - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + [panels] + disable_sanitize_html = true - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana ports: - - name: admin-http - port: 9090 - targetPort: 9090 + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -1132,20 +1048,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -1153,45 +1069,39 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 + path: /api/health + port: 3000 initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + name: grafana ports: - - containerPort: 9090 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /api/health + port: 3000 securityContext: - runAsUser: 65534 + runAsUser: 472 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /etc/grafana + name: grafana-config readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -1222,8 +1132,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -1318,120 +1226,46 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- -apiVersion: apps/v1 -kind: Deployment +apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1439,39 +1273,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -1596,46 +1433,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -1643,40 +1512,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -1803,120 +1670,108 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator - template: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap + template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2040,61 +1895,229 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Prometheus ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name template: metadata: annotations: @@ -2102,46 +2125,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 2103 + runAsUser: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2242,8 +2265,6 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 - - --outbound-ports-to-ignore - - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init @@ -2265,33 +2286,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - tracing: - enabled: false diff --git a/cli/cmd/testdata/install_controlplane_tracing_output.golden b/cli/cmd/testdata/install_controlplane_tracing_output.golden index f0d76a6884b20..dae781f8610aa 100644 --- a/cli/cmd/testdata/install_controlplane_tracing_output.golden +++ b/cli/cmd/testdata/install_controlplane_tracing_output.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Grafana RBAC ### --- @@ -1860,171 +1819,87 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Grafana ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + grafana.ini: |- + instance_name = linkerd-grafana - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] + [auth] + disable_login_form = true - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ + [auth.anonymous] + enabled = true + org_role = Editor - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + [auth.basic] + enabled = false - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop + [analytics] + check_for_updates = false - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + [panels] + disable_sanitize_html = true - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana ports: - - name: admin-http - port: 9090 - targetPort: 9090 + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -2032,20 +1907,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2053,45 +1928,39 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 + path: /api/health + port: 3000 initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + name: grafana ports: - - containerPort: 9090 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /api/health + port: 3000 securityContext: - runAsUser: 65534 + runAsUser: 472 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /etc/grafana + name: grafana-config readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2124,8 +1993,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2226,13 +2093,20 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - downwardAPI: items: - fieldRef: @@ -2244,108 +2118,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2353,39 +2146,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2518,20 +2314,14 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - downwardAPI: items: - fieldRef: @@ -2542,28 +2332,66 @@ spec: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2571,40 +2399,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2739,14 +2565,11 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - downwardAPI: items: - fieldRef: @@ -2757,66 +2580,52 @@ spec: medium: Memory name: linkerd-identity-end-entity --- +### +### Tap +### +--- kind: Service apiVersion: v1 metadata: - name: linkerd-proxy-injector + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -### -### Service Profile Validator -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-sp-validator - namespace: linkerd - labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2824,41 +2633,47 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info + - -trace-collector=linkerd-collector.linkerd.svc.cluster.local:55678 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2990,11 +2805,11 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - downwardAPI: items: - fieldRef: @@ -3004,53 +2819,262 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name template: metadata: annotations: @@ -3058,47 +3082,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - - -trace-collector=linkerd-collector.linkerd.svc.cluster.local:55678 - image: gcr.io/linkerd-io/controller:install-control-plane-version + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 2103 + runAsUser: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3207,8 +3230,6 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 - - --outbound-ports-to-ignore - - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init @@ -3230,11 +3251,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - downwardAPI: items: - fieldRef: @@ -3244,25 +3267,3 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - tracing: - enabled: false diff --git a/cli/cmd/testdata/install_custom_registry.golden b/cli/cmd/testdata/install_custom_registry.golden index de35e16e0426a..ea36851ba6d91 100644 --- a/cli/cmd/testdata/install_custom_registry.golden +++ b/cli/cmd/testdata/install_custom_registry.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Grafana RBAC ### --- @@ -1800,171 +1759,87 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Grafana ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + grafana.ini: |- + instance_name = linkerd-grafana - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] + [auth] + disable_login_form = true - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ + [auth.anonymous] + enabled = true + org_role = Editor - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + [auth.basic] + enabled = false - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop + [analytics] + check_for_updates = false - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + [panels] + disable_sanitize_html = true - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana ports: - - name: admin-http - port: 9090 - targetPort: 9090 + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -1972,20 +1847,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -1993,45 +1868,39 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - env: + - name: GF_PATHS_DATA + value: /data + image: my.custom.registry/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 + path: /api/health + port: 3000 initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + name: grafana ports: - - containerPort: 9090 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /api/health + port: 3000 securityContext: - runAsUser: 65534 + runAsUser: 472 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /etc/grafana + name: grafana-config readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2062,8 +1931,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2158,120 +2025,46 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2279,39 +2072,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: my.custom.registry/linkerd-io/grafana:install-control-plane-version + - args: + - proxy-injector + - -log-level=info + image: my.custom.registry/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2436,46 +2232,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2483,40 +2311,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: my.custom.registry/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2643,78 +2469,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +### +### Tap +### +--- kind: Service apiVersion: v1 metadata: - name: linkerd-proxy-injector + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -### -### Service Profile Validator -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-sp-validator - namespace: linkerd - labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2722,41 +2531,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: my.custom.registry/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2880,61 +2694,270 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Prometheus RBAC ### --- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name template: metadata: annotations: @@ -2942,46 +2965,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: my.custom.registry/linkerd-io/controller:install-control-plane-version + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 2103 + runAsUser: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3082,8 +3105,6 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 - - --outbound-ports-to-ignore - - "443" image: my.custom.registry/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init @@ -3105,33 +3126,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - tracing: - enabled: false diff --git a/cli/cmd/testdata/install_default.golden b/cli/cmd/testdata/install_default.golden index d6df5184837cb..af389edbce57e 100644 --- a/cli/cmd/testdata/install_default.golden +++ b/cli/cmd/testdata/install_default.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Grafana RBAC ### --- @@ -1800,171 +1759,87 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Grafana ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + grafana.ini: |- + instance_name = linkerd-grafana - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] + [auth] + disable_login_form = true - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ + [auth.anonymous] + enabled = true + org_role = Editor - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + [auth.basic] + enabled = false - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop + [analytics] + check_for_updates = false - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + [panels] + disable_sanitize_html = true - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana ports: - - name: admin-http - port: 9090 - targetPort: 9090 + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -1972,20 +1847,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -1993,45 +1868,39 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 + path: /api/health + port: 3000 initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + name: grafana ports: - - containerPort: 9090 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /api/health + port: 3000 securityContext: - runAsUser: 65534 + runAsUser: 472 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /etc/grafana + name: grafana-config readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2062,8 +1931,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2158,120 +2025,46 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2279,39 +2072,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2436,46 +2232,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2483,40 +2311,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2643,78 +2469,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +### +### Tap +### +--- kind: Service apiVersion: v1 metadata: - name: linkerd-proxy-injector + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -### -### Service Profile Validator -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-sp-validator - namespace: linkerd - labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2722,41 +2531,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2880,61 +2694,270 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Prometheus RBAC ### --- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name template: metadata: annotations: @@ -2942,46 +2965,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 2103 + runAsUser: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3082,8 +3105,6 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 - - --outbound-ports-to-ignore - - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init @@ -3105,33 +3126,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - tracing: - enabled: false diff --git a/cli/cmd/testdata/install_ha_output.golden b/cli/cmd/testdata/install_ha_output.golden index badb22a3ed425..1d9089e86fd94 100644 --- a/cli/cmd/testdata/install_ha_output.golden +++ b/cli/cmd/testdata/install_ha_output.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Grafana RBAC ### --- @@ -1928,171 +1887,87 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Grafana ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + grafana.ini: |- + instance_name = linkerd-grafana - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] + [auth] + disable_login_form = true - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ + [auth.anonymous] + enabled = true + org_role = Editor - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + [auth.basic] + enabled = false - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop + [analytics] + check_for_updates = false - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + [panels] + disable_sanitize_html = true - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana ports: - - name: admin-http - port: 9090 - targetPort: 9090 + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -2100,20 +1975,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2121,52 +1996,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 + path: /api/health + port: 3000 initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + name: grafana ports: - - containerPort: 9090 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /api/health + port: 3000 resources: limits: - cpu: "4" - memory: "8192Mi" + cpu: "1" + memory: "1024Mi" requests: - cpu: "300m" - memory: "300Mi" + cpu: "100m" + memory: "50Mi" securityContext: - runAsUser: 65534 + runAsUser: 472 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /etc/grafana + name: grafana-config readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2197,8 +2066,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2299,120 +2166,49 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: - replicas: 1 + replicas: 3 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector + strategy: + rollingUpdate: + maxUnavailable: 1 template: metadata: annotations: @@ -2420,46 +2216,69 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: kubernetes.io/hostname containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 resources: limits: cpu: "1" - memory: "1024Mi" + memory: "250Mi" requests: cpu: "100m" memory: "50Mi" securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2590,46 +2409,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 3 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator strategy: rollingUpdate: maxUnavailable: 1 @@ -2640,10 +2491,10 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux @@ -2656,7 +2507,7 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - proxy-injector + - sp-validator topologyKey: failure-domain.beta.kubernetes.io/zone weight: 100 requiredDuringSchedulingIgnoredDuringExecution: @@ -2665,30 +2516,30 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - proxy-injector + - sp-validator topologyKey: kubernetes.io/hostname containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 resources: limits: cpu: "1" @@ -2699,8 +2550,6 @@ spec: securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2833,78 +2682,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 3 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap strategy: rollingUpdate: maxUnavailable: 1 @@ -2915,10 +2747,10 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux @@ -2931,7 +2763,7 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - sp-validator + - tap topologyKey: failure-domain.beta.kubernetes.io/zone weight: 100 requiredDuringSchedulingIgnoredDuringExecution: @@ -2940,30 +2772,33 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - sp-validator + - tap topologyKey: kubernetes.io/hostname containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 resources: limits: cpu: "1" @@ -2977,6 +2812,8 @@ spec: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3106,64 +2943,277 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus + resources: + cpu: + limit: "4" + request: 300m + memory: + limit: 8192Mi + request: 300Mi + tracing: + enabled: false +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- ### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd spec: - replicas: 3 + replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap - strategy: - rollingUpdate: - maxUnavailable: 1 + linkerd.io/proxy-deployment: .Values.name template: metadata: annotations: @@ -3171,73 +3221,53 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name spec: nodeSelector: beta.kubernetes.io/os: linux - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - tap - topologyKey: failure-domain.beta.kubernetes.io/zone - weight: 100 - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - tap - topologyKey: kubernetes.io/hostname containers: - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 resources: limits: - cpu: "1" - memory: "250Mi" + cpu: "4" + memory: "8192Mi" requests: - cpu: "100m" - memory: "50Mi" + cpu: "300m" + memory: "300Mi" securityContext: - runAsUser: 2103 + runAsUser: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3344,8 +3374,6 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 - - --outbound-ports-to-ignore - - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init @@ -3367,33 +3395,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - tracing: - enabled: false diff --git a/cli/cmd/testdata/install_ha_with_overrides_output.golden b/cli/cmd/testdata/install_ha_with_overrides_output.golden index ff5b209e05af0..0ab1fca014a2e 100644 --- a/cli/cmd/testdata/install_ha_with_overrides_output.golden +++ b/cli/cmd/testdata/install_ha_with_overrides_output.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Grafana RBAC ### --- @@ -1928,171 +1887,87 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Grafana ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + grafana.ini: |- + instance_name = linkerd-grafana - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] + [auth] + disable_login_form = true - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ + [auth.anonymous] + enabled = true + org_role = Editor - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + [auth.basic] + enabled = false - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop + [analytics] + check_for_updates = false - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + [panels] + disable_sanitize_html = true - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana ports: - - name: admin-http - port: 9090 - targetPort: 9090 + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -2100,20 +1975,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2121,52 +1996,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 + path: /api/health + port: 3000 initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + name: grafana ports: - - containerPort: 9090 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /api/health + port: 3000 resources: limits: - cpu: "4" - memory: "8192Mi" + cpu: "1" + memory: "1024Mi" requests: - cpu: "300m" - memory: "300Mi" + cpu: "100m" + memory: "50Mi" securityContext: - runAsUser: 65534 + runAsUser: 472 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /etc/grafana + name: grafana-config readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2197,8 +2066,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2299,120 +2166,49 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: - replicas: 1 + replicas: 2 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector + strategy: + rollingUpdate: + maxUnavailable: 1 template: metadata: annotations: @@ -2420,46 +2216,69 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: kubernetes.io/hostname containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 resources: limits: cpu: "1" - memory: "1024Mi" + memory: "250Mi" requests: cpu: "100m" memory: "50Mi" securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2590,46 +2409,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 2 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator strategy: rollingUpdate: maxUnavailable: 1 @@ -2640,10 +2491,10 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux @@ -2656,7 +2507,7 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - proxy-injector + - sp-validator topologyKey: failure-domain.beta.kubernetes.io/zone weight: 100 requiredDuringSchedulingIgnoredDuringExecution: @@ -2665,30 +2516,30 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - proxy-injector + - sp-validator topologyKey: kubernetes.io/hostname containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 resources: limits: cpu: "1" @@ -2699,8 +2550,6 @@ spec: securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2833,78 +2682,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 2 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap strategy: rollingUpdate: maxUnavailable: 1 @@ -2915,10 +2747,10 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux @@ -2931,7 +2763,7 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - sp-validator + - tap topologyKey: failure-domain.beta.kubernetes.io/zone weight: 100 requiredDuringSchedulingIgnoredDuringExecution: @@ -2940,30 +2772,33 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - sp-validator + - tap topologyKey: kubernetes.io/hostname containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 resources: limits: cpu: "1" @@ -2977,6 +2812,8 @@ spec: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3106,64 +2943,277 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus + resources: + cpu: + limit: "4" + request: 300m + memory: + limit: 8192Mi + request: 300Mi + tracing: + enabled: false +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- ### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd spec: - replicas: 2 + replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap - strategy: - rollingUpdate: - maxUnavailable: 1 + linkerd.io/proxy-deployment: .Values.name template: metadata: annotations: @@ -3171,73 +3221,53 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name spec: nodeSelector: beta.kubernetes.io/os: linux - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - tap - topologyKey: failure-domain.beta.kubernetes.io/zone - weight: 100 - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - tap - topologyKey: kubernetes.io/hostname containers: - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 resources: limits: - cpu: "1" - memory: "250Mi" + cpu: "4" + memory: "8192Mi" requests: - cpu: "100m" - memory: "50Mi" + cpu: "300m" + memory: "300Mi" securityContext: - runAsUser: 2103 + runAsUser: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3344,8 +3374,6 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 - - --outbound-ports-to-ignore - - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init @@ -3367,33 +3395,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - tracing: - enabled: false diff --git a/cli/cmd/testdata/install_heartbeat_disabled_output.golden b/cli/cmd/testdata/install_heartbeat_disabled_output.golden index c52b92c2ff1e5..148a65b253275 100644 --- a/cli/cmd/testdata/install_heartbeat_disabled_output.golden +++ b/cli/cmd/testdata/install_heartbeat_disabled_output.golden @@ -336,47 +336,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Grafana RBAC ### --- @@ -1711,171 +1670,87 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Grafana ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + grafana.ini: |- + instance_name = linkerd-grafana - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] + [auth] + disable_login_form = true - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ + [auth.anonymous] + enabled = true + org_role = Editor - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + [auth.basic] + enabled = false - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop + [analytics] + check_for_updates = false - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + [panels] + disable_sanitize_html = true - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana ports: - - name: admin-http - port: 9090 - targetPort: 9090 + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -1883,20 +1758,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -1904,45 +1779,39 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 + path: /api/health + port: 3000 initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + name: grafana ports: - - containerPort: 9090 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /api/health + port: 3000 securityContext: - runAsUser: 65534 + runAsUser: 472 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /etc/grafana + name: grafana-config readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -1973,8 +1842,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2069,120 +1936,46 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2190,39 +1983,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2347,46 +2143,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2394,40 +2222,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2554,78 +2380,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +### +### Tap +### +--- kind: Service apiVersion: v1 metadata: - name: linkerd-proxy-injector + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -### -### Service Profile Validator -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-sp-validator - namespace: linkerd - labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2633,41 +2442,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2791,61 +2605,270 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Prometheus RBAC ### --- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name template: metadata: annotations: @@ -2853,46 +2876,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 2103 + runAsUser: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2993,8 +3016,6 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 - - --outbound-ports-to-ignore - - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init @@ -3016,33 +3037,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - tracing: - enabled: false diff --git a/cli/cmd/testdata/install_helm_output.golden b/cli/cmd/testdata/install_helm_output.golden index e46bcbdcbb57a..277850dafe097 100644 --- a/cli/cmd/testdata/install_helm_output.golden +++ b/cli/cmd/testdata/install_helm_output.golden @@ -392,49 +392,6 @@ spec: description: The apex service of this split. JSONPath: .spec.service --- -# Source: linkerd2/templates/prometheus-rbac.yaml ---- -### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- # Source: linkerd2/templates/grafana-rbac.yaml --- ### @@ -1863,174 +1820,90 @@ spec: medium: Memory name: linkerd-identity-end-entity --- -# Source: linkerd2/templates/prometheus.yaml +# Source: linkerd2/templates/grafana.yaml --- ### -### Prometheus +### Grafana ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/helm linkerd-version data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + grafana.ini: |- + instance_name = linkerd-grafana - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] + [auth] + disable_login_form = true - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ + [auth.anonymous] + enabled = true + org_role = Editor - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + [auth.basic] + enabled = false - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop + [analytics] + check_for_updates = false - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + [panels] + disable_sanitize_html = true - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/helm linkerd-version spec: type: ClusterIP selector: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana ports: - - name: admin-http - port: 9090 - targetPort: 9090 + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -2038,20 +1911,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2059,45 +1932,39 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 + path: /api/health + port: 3000 initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + name: grafana ports: - - containerPort: 9090 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /api/health + port: 3000 securityContext: - runAsUser: 65534 + runAsUser: 472 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /etc/grafana + name: grafana-config readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2128,8 +1995,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2215,162 +2080,92 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -# Source: linkerd2/templates/grafana.yaml +# Source: linkerd2/templates/proxy-injector.yaml --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: + linkerd.io/helm-release-version: "0" linkerd.io/created-by: linkerd/helm linkerd-version linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:linkerd-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2486,48 +2281,80 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -# Source: linkerd2/templates/proxy-injector.yaml +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- +# Source: linkerd2/templates/sp-validator.yaml --- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2536,40 +2363,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2687,80 +2512,63 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -# Source: linkerd2/templates/sp-validator.yaml +# Source: linkerd2/templates/tap.yaml --- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/helm linkerd-version spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2769,41 +2577,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2918,111 +2731,325 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls --- -# Source: linkerd2/templates/tap.yaml +# Source: linkerd2/templates/smi-metrics.yaml + +--- +# Source: linkerd2/templates/linkerd-config-addons.yaml --- ### -### Tap +### linkerd add-ons configuration ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +data: + values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus + tracing: + enabled: false +--- +# Source: linkerd2/charts/prometheus/templates/prometheus-rbac.yaml +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +# Source: linkerd2/charts/prometheus/templates/prometheus.yaml +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/helm linkerd-version spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name template: metadata: annotations: - linkerd.io/helm-release-version: "0" linkerd.io/created-by: linkerd/helm linkerd-version linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name spec: nodeSelector: - beta.kubernetes.io/os: linux + null containers: - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:linkerd-version + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 2103 + runAsUser: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3115,7 +3142,7 @@ spec: - --inbound-ports-to-ignore - 4190,4191,222 - --outbound-ports-to-ignore - - 443,111 + - "111" image: gcr.io/linkerd-io/proxy-init:test-proxy-init-version imagePullPolicy: IfNotPresent name: linkerd-init @@ -3137,37 +3164,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls ---- -# Source: linkerd2/templates/smi-metrics.yaml - ---- -# Source: linkerd2/templates/linkerd-config-addons.yaml ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -data: - values: |- - tracing: - enabled: false diff --git a/cli/cmd/testdata/install_helm_output_addons.golden b/cli/cmd/testdata/install_helm_output_addons.golden index c45ea4ae0138e..58b25d7c8386c 100644 --- a/cli/cmd/testdata/install_helm_output_addons.golden +++ b/cli/cmd/testdata/install_helm_output_addons.golden @@ -392,49 +392,6 @@ spec: description: The apex service of this split. JSONPath: .spec.service --- -# Source: linkerd2/templates/prometheus-rbac.yaml ---- -### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- # Source: linkerd2/templates/grafana-rbac.yaml --- ### @@ -1863,174 +1820,90 @@ spec: medium: Memory name: linkerd-identity-end-entity --- -# Source: linkerd2/templates/prometheus.yaml +# Source: linkerd2/templates/grafana.yaml --- ### -### Prometheus +### Grafana ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/helm linkerd-version data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + grafana.ini: |- + instance_name = linkerd-grafana - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] + [auth] + disable_login_form = true - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ + [auth.anonymous] + enabled = true + org_role = Editor - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + [auth.basic] + enabled = false - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop + [analytics] + check_for_updates = false - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + [panels] + disable_sanitize_html = true - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/helm linkerd-version spec: type: ClusterIP selector: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana ports: - - name: admin-http - port: 9090 - targetPort: 9090 + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -2038,20 +1911,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2059,45 +1932,39 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 + path: /api/health + port: 3000 initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + name: grafana ports: - - containerPort: 9090 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /api/health + port: 3000 securityContext: - runAsUser: 65534 + runAsUser: 472 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /etc/grafana + name: grafana-config readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2128,8 +1995,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2215,162 +2080,92 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -# Source: linkerd2/templates/grafana.yaml +# Source: linkerd2/templates/proxy-injector.yaml --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: + linkerd.io/helm-release-version: "0" linkerd.io/created-by: linkerd/helm linkerd-version linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:linkerd-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2486,48 +2281,80 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -# Source: linkerd2/templates/proxy-injector.yaml +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- +# Source: linkerd2/templates/sp-validator.yaml --- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2536,40 +2363,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2687,242 +2512,11 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -# Source: linkerd2/templates/sp-validator.yaml ---- -### -### Service Profile Validator -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-sp-validator - namespace: linkerd - labels: - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: sp-validator - ports: - - name: sp-validator - port: 443 - targetPort: sp-validator ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version - labels: - app.kubernetes.io/name: sp-validator - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: sp-validator - template: - metadata: - annotations: - linkerd.io/helm-release-version: "0" - linkerd.io/created-by: linkerd/helm linkerd-version - linkerd.io/identity-mode: default - linkerd.io/proxy-version: test-proxy-version - labels: - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - sp-validator - - -log-level=info - image: gcr.io/linkerd-io/controller:linkerd-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9997 - initialDelaySeconds: 10 - name: sp-validator - ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9997 - securityContext: - runAsUser: 2103 - volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - test-trust-anchor - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: test.trust.domain - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:test-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,222 - - --outbound-ports-to-ignore - - 443,111 - image: gcr.io/linkerd-io/proxy-init:test-proxy-init-version - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator - volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity @@ -3169,6 +2763,11 @@ metadata: linkerd.io/created-by: linkerd/helm linkerd-version data: values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus tracing: collector: image: omnition/opencensus-collector:0.1.11 @@ -3640,3 +3239,407 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +# Source: linkerd2/charts/prometheus/templates/prometheus-rbac.yaml +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +# Source: linkerd2/charts/prometheus/templates/prometheus.yaml +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: linkerd-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: .Values.name + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version + linkerd.io/identity-mode: default + linkerd.io/proxy-version: test-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: .Values.name + spec: + nodeSelector: + null + containers: + - args: + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsUser: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + test-trust-anchor + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: test.trust.domain + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:test-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191,222 + - --outbound-ports-to-ignore + - "111" + image: gcr.io/linkerd-io/proxy-init:test-proxy-init-version + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-prometheus + volumes: + - emptyDir: {} + name: data + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_helm_output_ha.golden b/cli/cmd/testdata/install_helm_output_ha.golden index 3a3cf7fd64615..272675423562f 100644 --- a/cli/cmd/testdata/install_helm_output_ha.golden +++ b/cli/cmd/testdata/install_helm_output_ha.golden @@ -392,49 +392,6 @@ spec: description: The apex service of this split. JSONPath: .spec.service --- -# Source: linkerd2/templates/prometheus-rbac.yaml ---- -### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- # Source: linkerd2/templates/grafana-rbac.yaml --- ### @@ -1991,174 +1948,90 @@ spec: medium: Memory name: linkerd-identity-end-entity --- -# Source: linkerd2/templates/prometheus.yaml +# Source: linkerd2/templates/grafana.yaml --- ### -### Prometheus +### Grafana ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/helm linkerd-version data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + grafana.ini: |- + instance_name = linkerd-grafana - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] + [auth] + disable_login_form = true - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ + [auth.anonymous] + enabled = true + org_role = Editor - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + [auth.basic] + enabled = false - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop + [analytics] + check_for_updates = false - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + [panels] + disable_sanitize_html = true - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/helm linkerd-version spec: type: ClusterIP selector: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana ports: - - name: admin-http - port: 9090 - targetPort: 9090 + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -2166,20 +2039,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2187,52 +2060,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 + path: /api/health + port: 3000 initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + name: grafana ports: - - containerPort: 9090 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /api/health + port: 3000 resources: limits: - cpu: "4" - memory: "8192Mi" + cpu: "1" + memory: "1024Mi" requests: - cpu: "300m" - memory: "300Mi" + cpu: "100m" + memory: "50Mi" securityContext: - runAsUser: 65534 + runAsUser: 472 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /etc/grafana + name: grafana-config readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2263,8 +2130,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2356,169 +2221,122 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -# Source: linkerd2/templates/grafana.yaml +# Source: linkerd2/templates/proxy-injector.yaml --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: - replicas: 1 + replicas: 3 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector + strategy: + rollingUpdate: + maxUnavailable: 1 template: metadata: annotations: + linkerd.io/helm-release-version: "0" linkerd.io/created-by: linkerd/helm linkerd-version linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: kubernetes.io/hostname containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:linkerd-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 resources: limits: cpu: "1" - memory: "1024Mi" + memory: "250Mi" requests: cpu: "100m" memory: "50Mi" securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2640,48 +2458,80 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -# Source: linkerd2/templates/proxy-injector.yaml +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- +# Source: linkerd2/templates/sp-validator.yaml --- ### -### Proxy Injector +### Service Profile Validator ### --- -apiVersion: apps/v1 -kind: Deployment +kind: Service +apiVersion: v1 metadata: - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version + name: linkerd-sp-validator + namespace: linkerd labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version + labels: + app.kubernetes.io/name: sp-validator + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: linkerd-version + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + name: linkerd-sp-validator + namespace: linkerd spec: replicas: 3 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator strategy: rollingUpdate: maxUnavailable: 1 @@ -2693,10 +2543,10 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux @@ -2709,7 +2559,7 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - proxy-injector + - sp-validator topologyKey: failure-domain.beta.kubernetes.io/zone weight: 100 requiredDuringSchedulingIgnoredDuringExecution: @@ -2718,30 +2568,30 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - proxy-injector + - sp-validator topologyKey: kubernetes.io/hostname containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 resources: limits: cpu: "1" @@ -2752,8 +2602,6 @@ spec: securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2877,80 +2725,63 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -# Source: linkerd2/templates/sp-validator.yaml +# Source: linkerd2/templates/tap.yaml --- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/helm linkerd-version spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 3 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap strategy: rollingUpdate: maxUnavailable: 1 @@ -2962,10 +2793,10 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux @@ -2978,7 +2809,7 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - sp-validator + - tap topologyKey: failure-domain.beta.kubernetes.io/zone weight: 100 requiredDuringSchedulingIgnoredDuringExecution: @@ -2987,30 +2818,33 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - sp-validator + - tap topologyKey: kubernetes.io/hostname containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 resources: limits: cpu: "1" @@ -3024,6 +2858,8 @@ spec: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3144,141 +2980,339 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls --- -# Source: linkerd2/templates/tap.yaml +# Source: linkerd2/templates/smi-metrics.yaml + +--- +# Source: linkerd2/templates/linkerd-config-addons.yaml --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +data: + values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus + resources: + cpu: + limit: "4" + request: 300m + memory: + limit: 8192Mi + request: 300Mi + tracing: + enabled: false +--- +# Source: linkerd2/charts/prometheus/templates/prometheus-rbac.yaml +--- +### +### Prometheus RBAC ### --- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +# Source: linkerd2/charts/prometheus/templates/prometheus.yaml +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/helm linkerd-version spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd spec: - replicas: 3 + replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap - strategy: - rollingUpdate: - maxUnavailable: 1 + linkerd.io/proxy-deployment: .Values.name template: metadata: annotations: - linkerd.io/helm-release-version: "0" linkerd.io/created-by: linkerd/helm linkerd-version linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name spec: nodeSelector: - beta.kubernetes.io/os: linux - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - tap - topologyKey: failure-domain.beta.kubernetes.io/zone - weight: 100 - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - tap - topologyKey: kubernetes.io/hostname + null containers: - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:linkerd-version + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 resources: limits: - cpu: "1" - memory: "250Mi" + cpu: "4" + memory: "8192Mi" requests: - cpu: "100m" - memory: "50Mi" + cpu: "300m" + memory: "300Mi" securityContext: - runAsUser: 2103 + runAsUser: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3377,7 +3411,7 @@ spec: - --inbound-ports-to-ignore - 4190,4191,222 - --outbound-ports-to-ignore - - 443,111 + - "111" image: gcr.io/linkerd-io/proxy-init:test-proxy-init-version imagePullPolicy: IfNotPresent name: linkerd-init @@ -3399,37 +3433,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls ---- -# Source: linkerd2/templates/smi-metrics.yaml - ---- -# Source: linkerd2/templates/linkerd-config-addons.yaml ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -data: - values: |- - tracing: - enabled: false diff --git a/cli/cmd/testdata/install_no_init_container.golden b/cli/cmd/testdata/install_no_init_container.golden index 2645c4846b78b..e76216bfd4d3b 100644 --- a/cli/cmd/testdata/install_no_init_container.golden +++ b/cli/cmd/testdata/install_no_init_container.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Grafana RBAC ### --- @@ -1665,171 +1624,87 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Grafana ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + grafana.ini: |- + instance_name = linkerd-grafana - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] + [auth] + disable_login_form = true - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ + [auth.anonymous] + enabled = true + org_role = Editor - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + [auth.basic] + enabled = false - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop + [analytics] + check_for_updates = false - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + [panels] + disable_sanitize_html = true - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana ports: - - name: admin-http - port: 9090 - targetPort: 9090 + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -1837,20 +1712,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -1858,281 +1733,28 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 + path: /api/health + port: 3000 initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + name: grafana ports: - - containerPort: 9090 - name: admin-http - readinessProbe: - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Grafana -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: grafana - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana - ports: - - containerPort: 3000 - name: http + - containerPort: 3000 + name: http readinessProbe: httpGet: path: /api/health @@ -2833,5 +2455,386 @@ metadata: linkerd.io/created-by: linkerd/cli dev-undefined data: values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus tracing: enabled: false +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: .Values.name + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: .Values.name + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsUser: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + serviceAccountName: linkerd-prometheus + volumes: + - emptyDir: {} + name: data + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_proxy_ignores.golden b/cli/cmd/testdata/install_proxy_ignores.golden index 723e5a6e092f2..d30aef0e828d9 100644 --- a/cli/cmd/testdata/install_proxy_ignores.golden +++ b/cli/cmd/testdata/install_proxy_ignores.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Grafana RBAC ### --- @@ -1800,171 +1759,87 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Grafana ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + grafana.ini: |- + instance_name = linkerd-grafana - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] + [auth] + disable_login_form = true - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ + [auth.anonymous] + enabled = true + org_role = Editor - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + [auth.basic] + enabled = false - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop + [analytics] + check_for_updates = false - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + [panels] + disable_sanitize_html = true - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana ports: - - name: admin-http - port: 9090 - targetPort: 9090 + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -1972,20 +1847,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -1993,45 +1868,39 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 + path: /api/health + port: 3000 initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + name: grafana ports: - - containerPort: 9090 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /api/health + port: 3000 securityContext: - runAsUser: 65534 + runAsUser: 472 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /etc/grafana + name: grafana-config readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2062,8 +1931,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2158,120 +2025,46 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2279,39 +2072,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2436,46 +2232,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2483,40 +2311,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2643,78 +2469,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +### +### Tap +### +--- kind: Service apiVersion: v1 metadata: - name: linkerd-proxy-injector + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -### -### Service Profile Validator -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-sp-validator - namespace: linkerd - labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2722,41 +2531,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2880,61 +2694,270 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Prometheus RBAC ### --- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name template: metadata: annotations: @@ -2942,46 +2965,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 2103 + runAsUser: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3083,7 +3106,7 @@ spec: - --inbound-ports-to-ignore - 4190,4191,22,8100-8102 - --outbound-ports-to-ignore - - 443,5432 + - "5432" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init @@ -3105,33 +3128,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - tracing: - enabled: false diff --git a/cli/cmd/testdata/install_restricted_dashboard.golden b/cli/cmd/testdata/install_restricted_dashboard.golden index 9ecdb407b396b..39849976d3f20 100644 --- a/cli/cmd/testdata/install_restricted_dashboard.golden +++ b/cli/cmd/testdata/install_restricted_dashboard.golden @@ -312,47 +312,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Grafana RBAC ### --- @@ -1735,171 +1694,87 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Grafana ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + grafana.ini: |- + instance_name = linkerd-grafana - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] + [auth] + disable_login_form = true - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ + [auth.anonymous] + enabled = true + org_role = Editor - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + [auth.basic] + enabled = false - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop + [analytics] + check_for_updates = false - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + [panels] + disable_sanitize_html = true - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana ports: - - name: admin-http - port: 9090 - targetPort: 9090 + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -1907,20 +1782,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -1928,45 +1803,39 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 + path: /api/health + port: 3000 initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + name: grafana ports: - - containerPort: 9090 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /api/health + port: 3000 securityContext: - runAsUser: 65534 + runAsUser: 472 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /etc/grafana + name: grafana-config readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -1997,8 +1866,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2093,120 +1960,46 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2214,39 +2007,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2371,46 +2167,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2418,40 +2246,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2578,78 +2404,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +### +### Tap +### +--- kind: Service apiVersion: v1 metadata: - name: linkerd-proxy-injector + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -### -### Service Profile Validator -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-sp-validator - namespace: linkerd - labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2657,41 +2466,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2815,61 +2629,270 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Prometheus RBAC ### --- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name template: metadata: annotations: @@ -2877,46 +2900,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 2103 + runAsUser: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3017,8 +3040,6 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 - - --outbound-ports-to-ignore - - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init @@ -3040,33 +3061,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - tracing: - enabled: false diff --git a/cli/cmd/testdata/upgrade_default.golden b/cli/cmd/testdata/upgrade_default.golden index c47eb6805063a..3291c0bcf4ed4 100644 --- a/cli/cmd/testdata/upgrade_default.golden +++ b/cli/cmd/testdata/upgrade_default.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Grafana RBAC ### --- @@ -1808,171 +1767,87 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Grafana ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + grafana.ini: |- + instance_name = linkerd-grafana - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] + [auth] + disable_login_form = true - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ + [auth.anonymous] + enabled = true + org_role = Editor - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + [auth.basic] + enabled = false - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop + [analytics] + check_for_updates = false - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + [panels] + disable_sanitize_html = true - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana ports: - - name: admin-http - port: 9090 - targetPort: 9090 + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -1980,20 +1855,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2001,45 +1876,39 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 + path: /api/health + port: 3000 initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + name: grafana ports: - - containerPort: 9090 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /api/health + port: 3000 securityContext: - runAsUser: 65534 + runAsUser: 472 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /etc/grafana + name: grafana-config readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2070,8 +1939,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2168,120 +2035,46 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2289,39 +2082,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2448,46 +2244,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2495,40 +2323,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2657,78 +2483,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +### +### Tap +### +--- kind: Service apiVersion: v1 metadata: - name: linkerd-proxy-injector + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -### -### Service Profile Validator -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-sp-validator - namespace: linkerd - labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2736,41 +2545,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2896,61 +2710,270 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Prometheus RBAC ### --- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name template: metadata: annotations: @@ -2958,46 +2981,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 2103 + runAsUser: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3100,8 +3123,6 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191,2525-2527,2529 - - --outbound-ports-to-ignore - - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init @@ -3123,33 +3144,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - tracing: - enabled: false diff --git a/cli/cmd/testdata/upgrade_external_issuer.golden b/cli/cmd/testdata/upgrade_external_issuer.golden index 196311c6f2b4c..b541fc3a23a1f 100644 --- a/cli/cmd/testdata/upgrade_external_issuer.golden +++ b/cli/cmd/testdata/upgrade_external_issuer.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Grafana RBAC ### --- @@ -1794,171 +1753,87 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Grafana ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + grafana.ini: |- + instance_name = linkerd-grafana - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] + [auth] + disable_login_form = true - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ + [auth.anonymous] + enabled = true + org_role = Editor - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + [auth.basic] + enabled = false - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop + [analytics] + check_for_updates = false - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + [panels] + disable_sanitize_html = true - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana ports: - - name: admin-http - port: 9090 - targetPort: 9090 + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -1966,20 +1841,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -1987,45 +1862,39 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 + path: /api/health + port: 3000 initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + name: grafana ports: - - containerPort: 9090 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /api/health + port: 3000 securityContext: - runAsUser: 65534 + runAsUser: 472 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /etc/grafana + name: grafana-config readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2056,8 +1925,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2154,120 +2021,46 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2275,39 +2068,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2434,46 +2230,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2481,40 +2309,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2643,78 +2469,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +### +### Tap +### +--- kind: Service apiVersion: v1 metadata: - name: linkerd-proxy-injector + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -### -### Service Profile Validator -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-sp-validator - namespace: linkerd - labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2722,41 +2531,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2882,61 +2696,270 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Prometheus RBAC ### --- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name template: metadata: annotations: @@ -2944,46 +2967,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 2103 + runAsUser: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3086,8 +3109,6 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 - - --outbound-ports-to-ignore - - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init @@ -3109,33 +3130,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - tracing: - enabled: false diff --git a/cli/cmd/testdata/upgrade_ha.golden b/cli/cmd/testdata/upgrade_ha.golden index 5411a50df9382..1b4addccc1f7d 100644 --- a/cli/cmd/testdata/upgrade_ha.golden +++ b/cli/cmd/testdata/upgrade_ha.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Grafana RBAC ### --- @@ -1936,171 +1895,87 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Grafana ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + grafana.ini: |- + instance_name = linkerd-grafana - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] + [auth] + disable_login_form = true - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ + [auth.anonymous] + enabled = true + org_role = Editor - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + [auth.basic] + enabled = false - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop + [analytics] + check_for_updates = false - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + [panels] + disable_sanitize_html = true - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana ports: - - name: admin-http - port: 9090 - targetPort: 9090 + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -2108,20 +1983,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2129,52 +2004,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 + path: /api/health + port: 3000 initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + name: grafana ports: - - containerPort: 9090 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /api/health + port: 3000 resources: limits: - cpu: "4" - memory: "8192Mi" + cpu: "1" + memory: "1024Mi" requests: - cpu: "300m" - memory: "300Mi" + cpu: "100m" + memory: "50Mi" securityContext: - runAsUser: 65534 + runAsUser: 472 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /etc/grafana + name: grafana-config readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2205,8 +2074,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2309,120 +2176,49 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: - replicas: 1 + replicas: 3 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector + strategy: + rollingUpdate: + maxUnavailable: 1 template: metadata: annotations: @@ -2430,46 +2226,69 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: kubernetes.io/hostname containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 resources: limits: cpu: "1" - memory: "1024Mi" + memory: "250Mi" requests: cpu: "100m" memory: "50Mi" securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2602,46 +2421,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 3 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator strategy: rollingUpdate: maxUnavailable: 1 @@ -2652,10 +2503,10 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux @@ -2668,7 +2519,7 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - proxy-injector + - sp-validator topologyKey: failure-domain.beta.kubernetes.io/zone weight: 100 requiredDuringSchedulingIgnoredDuringExecution: @@ -2677,30 +2528,30 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - proxy-injector + - sp-validator topologyKey: kubernetes.io/hostname containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 resources: limits: cpu: "1" @@ -2711,8 +2562,6 @@ spec: securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2847,78 +2696,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 3 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap strategy: rollingUpdate: maxUnavailable: 1 @@ -2929,10 +2761,10 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux @@ -2945,7 +2777,7 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - sp-validator + - tap topologyKey: failure-domain.beta.kubernetes.io/zone weight: 100 requiredDuringSchedulingIgnoredDuringExecution: @@ -2954,30 +2786,33 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - sp-validator + - tap topologyKey: kubernetes.io/hostname containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 resources: limits: cpu: "1" @@ -2991,6 +2826,8 @@ spec: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3122,64 +2959,277 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus + resources: + cpu: + limit: "4" + request: 300m + memory: + limit: 8192Mi + request: 300Mi + tracing: + enabled: false +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- ### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd spec: - replicas: 3 + replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap - strategy: - rollingUpdate: - maxUnavailable: 1 + linkerd.io/proxy-deployment: .Values.name template: metadata: annotations: @@ -3187,73 +3237,53 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name spec: nodeSelector: beta.kubernetes.io/os: linux - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - tap - topologyKey: failure-domain.beta.kubernetes.io/zone - weight: 100 - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - tap - topologyKey: kubernetes.io/hostname containers: - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 resources: limits: - cpu: "1" - memory: "250Mi" + cpu: "4" + memory: "8192Mi" requests: - cpu: "100m" - memory: "50Mi" + cpu: "300m" + memory: "300Mi" securityContext: - runAsUser: 2103 + runAsUser: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3362,8 +3392,6 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 - - --outbound-ports-to-ignore - - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init @@ -3385,33 +3413,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - tracing: - enabled: false diff --git a/cli/cmd/testdata/upgrade_ha_config.golden b/cli/cmd/testdata/upgrade_ha_config.golden index d57c04579b7d8..dc36dc124b33e 100644 --- a/cli/cmd/testdata/upgrade_ha_config.golden +++ b/cli/cmd/testdata/upgrade_ha_config.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Grafana RBAC ### --- @@ -838,3 +797,44 @@ subjects: name: linkerd-web namespace: linkerd +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd diff --git a/cli/cmd/testdata/upgrade_overwrite_issuer.golden b/cli/cmd/testdata/upgrade_overwrite_issuer.golden index 7dd5f67d27d2e..9b223cee38738 100644 --- a/cli/cmd/testdata/upgrade_overwrite_issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_issuer.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Grafana RBAC ### --- @@ -1800,171 +1759,87 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Grafana ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + grafana.ini: |- + instance_name = linkerd-grafana - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] + [auth] + disable_login_form = true - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ + [auth.anonymous] + enabled = true + org_role = Editor - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + [auth.basic] + enabled = false - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop + [analytics] + check_for_updates = false - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + [panels] + disable_sanitize_html = true - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana ports: - - name: admin-http - port: 9090 - targetPort: 9090 + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -1972,20 +1847,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -1993,45 +1868,39 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 + path: /api/health + port: 3000 initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + name: grafana ports: - - containerPort: 9090 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /api/health + port: 3000 securityContext: - runAsUser: 65534 + runAsUser: 472 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /etc/grafana + name: grafana-config readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2062,8 +1931,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2158,120 +2025,46 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2279,39 +2072,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2436,46 +2232,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2483,40 +2311,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2643,78 +2469,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +### +### Tap +### +--- kind: Service apiVersion: v1 metadata: - name: linkerd-proxy-injector + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -### -### Service Profile Validator -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-sp-validator - namespace: linkerd - labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2722,41 +2531,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2880,61 +2694,270 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Prometheus RBAC ### --- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name template: metadata: annotations: @@ -2942,46 +2965,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 2103 + runAsUser: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3082,8 +3105,6 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 - - --outbound-ports-to-ignore - - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init @@ -3105,33 +3126,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - tracing: - enabled: false diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden index 9f3904493f3c1..752769d1eeccf 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Grafana RBAC ### --- @@ -1786,171 +1745,87 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Grafana ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + grafana.ini: |- + instance_name = linkerd-grafana - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] + [auth] + disable_login_form = true - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ + [auth.anonymous] + enabled = true + org_role = Editor - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + [auth.basic] + enabled = false - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop + [analytics] + check_for_updates = false - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + [panels] + disable_sanitize_html = true - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana ports: - - name: admin-http - port: 9090 - targetPort: 9090 + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -1958,20 +1833,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -1979,45 +1854,39 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 + path: /api/health + port: 3000 initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + name: grafana ports: - - containerPort: 9090 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /api/health + port: 3000 securityContext: - runAsUser: 65534 + runAsUser: 472 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /etc/grafana + name: grafana-config readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2048,8 +1917,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2144,120 +2011,46 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2265,39 +2058,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2422,46 +2218,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2469,40 +2297,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2629,78 +2455,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +### +### Tap +### +--- kind: Service apiVersion: v1 metadata: - name: linkerd-proxy-injector + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -### -### Service Profile Validator -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-sp-validator - namespace: linkerd - labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2708,41 +2517,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2866,61 +2680,270 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Prometheus RBAC ### --- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name template: metadata: annotations: @@ -2928,46 +2951,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 2103 + runAsUser: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3068,8 +3091,6 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 - - --outbound-ports-to-ignore - - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init @@ -3091,33 +3112,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - tracing: - enabled: false diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden index 7dd5f67d27d2e..9b223cee38738 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Grafana RBAC ### --- @@ -1800,171 +1759,87 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Grafana ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + grafana.ini: |- + instance_name = linkerd-grafana - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] + [auth] + disable_login_form = true - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ + [auth.anonymous] + enabled = true + org_role = Editor - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + [auth.basic] + enabled = false - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop + [analytics] + check_for_updates = false - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + [panels] + disable_sanitize_html = true - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana ports: - - name: admin-http - port: 9090 - targetPort: 9090 + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -1972,20 +1847,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -1993,45 +1868,39 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 + path: /api/health + port: 3000 initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + name: grafana ports: - - containerPort: 9090 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /api/health + port: 3000 securityContext: - runAsUser: 65534 + runAsUser: 472 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /etc/grafana + name: grafana-config readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2062,8 +1931,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2158,120 +2025,46 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2279,39 +2072,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2436,46 +2232,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2483,40 +2311,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2643,78 +2469,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +### +### Tap +### +--- kind: Service apiVersion: v1 metadata: - name: linkerd-proxy-injector + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -### -### Service Profile Validator -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-sp-validator - namespace: linkerd - labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2722,41 +2531,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2880,61 +2694,270 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Prometheus RBAC ### --- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name template: metadata: annotations: @@ -2942,46 +2965,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 2103 + runAsUser: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3082,8 +3105,6 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 - - --outbound-ports-to-ignore - - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init @@ -3105,33 +3126,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - tracing: - enabled: false diff --git a/pkg/charts/linkerd2/prometheus.go b/pkg/charts/linkerd2/prometheus.go index f6360daf7e95f..807aeadb17f38 100644 --- a/pkg/charts/linkerd2/prometheus.go +++ b/pkg/charts/linkerd2/prometheus.go @@ -6,7 +6,7 @@ import ( ) var ( - prometheusAddOn = "tracing" + prometheusAddOn = "prometheus" ) // Prometheus is an add-on that installs the prometheus component diff --git a/pkg/charts/linkerd2/values_test.go b/pkg/charts/linkerd2/values_test.go index e129f42c97a0a..64985cb2d83d8 100644 --- a/pkg/charts/linkerd2/values_test.go +++ b/pkg/charts/linkerd2/values_test.go @@ -31,9 +31,10 @@ func TestNewValues(t *testing.T) { HeartbeatSchedule: "0 0 * * *", InstallNamespace: true, Prometheus: Prometheus{ - "enabled": true, - "name": "linkerd-prometheus", - "image": "prom/prometheus:v2.15.2", + "enabled": true, + "name": "linkerd-prometheus", + "image": "prom/prometheus:v2.15.2", + "logLevel": "info", }, Global: &Global{ Namespace: "linkerd", @@ -201,9 +202,10 @@ func TestNewValues(t *testing.T) { } expected.Prometheus = Prometheus{ - "enabled": true, - "name": "linkerd-prometheus", - "image": "prom/prometheus:v2.15.2", + "enabled": true, + "logLevel": "info", + "name": "linkerd-prometheus", + "image": "prom/prometheus:v2.15.2", "resources": map[string]interface{}{ "cpu": map[string]interface{}{ "limit": "4", From 751d236f176c481640194e16f78bbb83e9786721 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Fri, 8 May 2020 12:57:37 +0530 Subject: [PATCH 03/42] prometheus charts linting Signed-off-by: Tarun Pothulapati --- charts/add-ons/prometheus/Chart.yaml | 2 +- charts/add-ons/prometheus/requirements.yaml | 1 - charts/add-ons/prometheus/values.yaml | 68 --------------------- 3 files changed, 1 insertion(+), 70 deletions(-) diff --git a/charts/add-ons/prometheus/Chart.yaml b/charts/add-ons/prometheus/Chart.yaml index ddcfc4531ffe9..220f27dacda88 100644 --- a/charts/add-ons/prometheus/Chart.yaml +++ b/charts/add-ons/prometheus/Chart.yaml @@ -6,4 +6,4 @@ version: 0.1.0 maintainers: - name: Linkerd authors email: cncf-linkerd-dev@lists.cncf.io - url: https://linkerd.io/ \ No newline at end of file + url: https://linkerd.io/ diff --git a/charts/add-ons/prometheus/requirements.yaml b/charts/add-ons/prometheus/requirements.yaml index 8266e94ae195b..b5d3df3884f5b 100644 --- a/charts/add-ons/prometheus/requirements.yaml +++ b/charts/add-ons/prometheus/requirements.yaml @@ -2,4 +2,3 @@ dependencies: - name: partials version: 0.1.0 repository: file://../../partials - \ No newline at end of file diff --git a/charts/add-ons/prometheus/values.yaml b/charts/add-ons/prometheus/values.yaml index db246ce2a9d47..e69de29bb2d1d 100644 --- a/charts/add-ons/prometheus/values.yaml +++ b/charts/add-ons/prometheus/values.yaml @@ -1,68 +0,0 @@ -# Default values for prometheus. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -replicaCount: 1 - -image: - repository: nginx - tag: stable - pullPolicy: IfNotPresent - -imagePullSecrets: [] -nameOverride: "" -fullnameOverride: "" - -serviceAccount: - # Specifies whether a service account should be created - create: true - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: - -podSecurityContext: {} - # fsGroup: 2000 - -securityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 - -service: - type: ClusterIP - port: 80 - -ingress: - enabled: false - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - hosts: - - host: chart-example.local - paths: [] - - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - -nodeSelector: {} - -tolerations: [] - -affinity: {} From 04bbcae1bf57e53c8cc453e0af8c5124cc3c671f Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Fri, 8 May 2020 13:03:31 +0530 Subject: [PATCH 04/42] use common fix Signed-off-by: Tarun Pothulapati --- cli/cmd/install.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cli/cmd/install.go b/cli/cmd/install.go index 643e93559204a..c60ef4f2d53b9 100644 --- a/cli/cmd/install.go +++ b/cli/cmd/install.go @@ -851,7 +851,7 @@ func render(w io.Writer, values *l5dcharts.Values) error { Name: addOn.Name(), Dir: filepath.Join(addOnChartsPath, addOn.Name()), Namespace: controlPlaneNamespace, - RawValues: append(rawValues, addOn.Values()...), + RawValues: append(addOn.Values(), rawValues...), Files: []*chartutil.BufferedFile{&chartutil.BufferedFile{ Name: chartutil.ChartfileName, }}, From 6947cd012167d716bf1f19bf992e3341c65edf14 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Fri, 8 May 2020 13:15:43 +0530 Subject: [PATCH 05/42] add prometheus sub-chart dep to linkerd2 Signed-off-by: Tarun Pothulapati --- charts/linkerd2/requirements.lock | 7 +++++-- charts/linkerd2/requirements.yaml | 4 ++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/charts/linkerd2/requirements.lock b/charts/linkerd2/requirements.lock index e634f9b17f7f2..782ea925f94d3 100644 --- a/charts/linkerd2/requirements.lock +++ b/charts/linkerd2/requirements.lock @@ -1,9 +1,12 @@ dependencies: +- name: prometheus + repository: file://../add-ons/prometheus + version: 0.1.0 - name: partials repository: file://../partials version: 0.1.0 - name: tracing repository: file://../add-ons/tracing version: 0.1.0 -digest: sha256:e33d9505af13cd9a047d9a2d6094af4b4d62cdc9ee64970dc3d2bba6809066d0 -generated: "2020-01-30T00:02:25.011181273+05:30" +digest: sha256:b650dc0a30d65e4bbbff0c41e5b643fb96d0660cae658f185a1df5414602e306 +generated: "2020-05-08T13:09:25.498949694+05:30" diff --git a/charts/linkerd2/requirements.yaml b/charts/linkerd2/requirements.yaml index ced78a1d6676b..28fe240254142 100644 --- a/charts/linkerd2/requirements.yaml +++ b/charts/linkerd2/requirements.yaml @@ -1,4 +1,8 @@ dependencies: +- name: prometheus + version: 0.1.0 + repository: file://../add-ons/prometheus + condition: prometheus.enabled - name: partials version: 0.1.0 repository: file://../partials From 48cc20607f062c74d01e4eb4393ed6bfa1af8544 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Fri, 8 May 2020 17:53:00 +0530 Subject: [PATCH 06/42] update golden files Signed-off-by: Tarun Pothulapati --- cli/cmd/testdata/install_addon_config.golden | 82 +- .../install_addon_control-plane.golden | 839 ++++++------ cli/cmd/testdata/install_tracing.golden | 1165 ++++++++-------- cli/cmd/testdata/upgrade_add-on_config.golden | 82 +- .../upgrade_add-on_controlplane.golden | 843 ++++++------ .../testdata/upgrade_add-on_overwrite.golden | 1169 +++++++++-------- cli/cmd/testdata/upgrade_add_add-on.golden | 1169 +++++++++-------- 7 files changed, 2677 insertions(+), 2672 deletions(-) diff --git a/cli/cmd/testdata/install_addon_config.golden b/cli/cmd/testdata/install_addon_config.golden index ed99b0d431247..d963bac9c564e 100644 --- a/cli/cmd/testdata/install_addon_config.golden +++ b/cli/cmd/testdata/install_addon_config.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Grafana RBAC ### --- @@ -864,3 +823,44 @@ metadata: labels: linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd diff --git a/cli/cmd/testdata/install_addon_control-plane.golden b/cli/cmd/testdata/install_addon_control-plane.golden index 1957e5901c828..16cf082d151d7 100644 --- a/cli/cmd/testdata/install_addon_control-plane.golden +++ b/cli/cmd/testdata/install_addon_control-plane.golden @@ -960,376 +960,6 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: prometheus - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus - ports: - - containerPort: 9090 - name: admin-http - readinessProbe: - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### ### Grafana ### --- @@ -2293,6 +1923,11 @@ metadata: linkerd.io/created-by: linkerd/cli dev-undefined data: values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus tracing: collector: image: omnition/opencensus-collector:0.1.11 @@ -2405,21 +2040,224 @@ spec: value: "80" image: omnition/opencensus-collector:0.1.11 imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: / - port: 13133 - name: oc-collector + livenessProbe: + httpGet: + path: / + port: 13133 + name: oc-collector + ports: + - containerPort: 55678 + - containerPort: 9411 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: linkerd-collector-config-val + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.2 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-collector + volumes: + - configMap: + items: + - key: linkerd-collector-config + path: linkerd-collector-config.yaml + name: linkerd-collector-config + name: linkerd-collector-config-val + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### Tracing Jaeger Service +### +--- +apiVersion: v1 +kind: Service +metadata: + name: linkerd-jaeger + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: linkerd-jaeger + ports: + - name: collection + port: 14268 + - name: ui + port: 16686 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: linkerd-jaeger + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-ns: linkerd + name: linkerd-jaeger + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-jaeger + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + prometheus.io/path: /metrics + prometheus.io/port: "8888" + prometheus.io/scrape: "true" + labels: + linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-jaeger + spec: + containers: + - image: jaegertracing/all-in-one:1.17.1 + imagePullPolicy: IfNotPresent + name: jaeger ports: - - containerPort: 55678 - - containerPort: 9411 - readinessProbe: - httpGet: - path: / - port: 13133 - volumeMounts: - - mountPath: /conf - name: linkerd-collector-config-val + - containerPort: 14268 + name: collection + - containerPort: 16686 + name: ui - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2543,41 +2381,179 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-collector + dnsPolicy: ClusterFirst + serviceAccountName: linkerd-jaeger volumes: - - configMap: - items: - - key: linkerd-collector-config - path: linkerd-collector-config.yaml - name: linkerd-collector-config - name: linkerd-collector-config-val - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Tracing Jaeger Service +### Prometheus ### --- +kind: ConfigMap apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- kind: Service +apiVersion: v1 metadata: - name: linkerd-jaeger + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: prometheus ports: - - name: collection - port: 14268 - - name: ui - port: 16686 + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -2585,44 +2561,67 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: linkerd-jaeger + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-jaeger + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: .Values.name template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version - prometheus.io/path: /metrics - prometheus.io/port: "8888" - prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: .Values.name spec: + nodeSelector: + beta.kubernetes.io/os: linux containers: - - image: jaegertracing/all-in-one:1.17.1 + - args: + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent - name: jaeger + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 14268 - name: collection - - containerPort: 16686 - name: ui + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsUser: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2723,8 +2722,6 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 - - --outbound-ports-to-ignore - - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init @@ -2746,9 +2743,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - dnsPolicy: ClusterFirst - serviceAccountName: linkerd-jaeger + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data + - configMap: + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_tracing.golden b/cli/cmd/testdata/install_tracing.golden index 42b2c00763c63..1a5c7fa163ddf 100644 --- a/cli/cmd/testdata/install_tracing.golden +++ b/cli/cmd/testdata/install_tracing.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Grafana RBAC ### --- @@ -1800,171 +1759,87 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Grafana ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + grafana.ini: |- + instance_name = linkerd-grafana - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] + [auth] + disable_login_form = true - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ + [auth.anonymous] + enabled = true + org_role = Editor - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + [auth.basic] + enabled = false - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop + [analytics] + check_for_updates = false - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + [panels] + disable_sanitize_html = true - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana ports: - - name: admin-http - port: 9090 - targetPort: 9090 + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -1972,20 +1847,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -1993,45 +1868,39 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 + path: /api/health + port: 3000 initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + name: grafana ports: - - containerPort: 9090 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /api/health + port: 3000 securityContext: - runAsUser: 65534 + runAsUser: 472 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /etc/grafana + name: grafana-config readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2062,8 +1931,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2158,120 +2025,46 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2279,39 +2072,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2436,46 +2232,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2483,40 +2311,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2643,260 +2469,23 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +### +### Tap +### +--- kind: Service apiVersion: v1 metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -### -### Service Profile Validator -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-sp-validator - namespace: linkerd - labels: - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: sp-validator - ports: - - name: sp-validator - port: 443 - targetPort: sp-validator ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: sp-validator - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: sp-validator - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - sp-validator - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9997 - initialDelaySeconds: 10 - name: sp-validator - ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9997 - securityContext: - runAsUser: 2103 - volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator - volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Tap -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-tap + name: linkerd-tap namespace: linkerd labels: linkerd.io/control-plane-component: tap @@ -3133,6 +2722,11 @@ metadata: linkerd.io/created-by: linkerd/cli dev-undefined data: values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus tracing: collector: image: omnition/opencensus-collector:0.1.11 @@ -3618,3 +3212,410 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: .Values.name + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: .Values.name + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsUser: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + image: gcr.io/linkerd-io/proxy-init:v1.3.2 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-prometheus + volumes: + - emptyDir: {} + name: data + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_add-on_config.golden b/cli/cmd/testdata/upgrade_add-on_config.golden index c5b23dfa4c283..02bde1405d2fa 100644 --- a/cli/cmd/testdata/upgrade_add-on_config.golden +++ b/cli/cmd/testdata/upgrade_add-on_config.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Grafana RBAC ### --- @@ -864,3 +823,44 @@ metadata: labels: linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd diff --git a/cli/cmd/testdata/upgrade_add-on_controlplane.golden b/cli/cmd/testdata/upgrade_add-on_controlplane.golden index 351e767ffb981..d6dd9a81acebf 100644 --- a/cli/cmd/testdata/upgrade_add-on_controlplane.golden +++ b/cli/cmd/testdata/upgrade_add-on_controlplane.golden @@ -968,378 +968,6 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: prometheus - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus - ports: - - containerPort: 9090 - name: admin-http - readinessProbe: - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### ### Grafana ### --- @@ -2311,6 +1939,11 @@ metadata: linkerd.io/created-by: linkerd/cli dev-undefined data: values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus tracing: collector: image: omnition/opencensus-collector:0.1.11 @@ -2423,21 +2056,226 @@ spec: value: "80" image: omnition/opencensus-collector:0.1.11 imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: / - port: 13133 - name: oc-collector + livenessProbe: + httpGet: + path: / + port: 13133 + name: oc-collector + ports: + - containerPort: 55678 + - containerPort: 9411 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: linkerd-collector-config-val + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw + JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 + MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r + ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg + Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ + Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB + /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe + aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC + IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R + SxZLbJKt6SJIIY9dw5gzQpUQR2U= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.2 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-collector + volumes: + - configMap: + items: + - key: linkerd-collector-config + path: linkerd-collector-config.yaml + name: linkerd-collector-config + name: linkerd-collector-config-val + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### Tracing Jaeger Service +### +--- +apiVersion: v1 +kind: Service +metadata: + name: linkerd-jaeger + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: linkerd-jaeger + ports: + - name: collection + port: 14268 + - name: ui + port: 16686 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: linkerd-jaeger + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-ns: linkerd + name: linkerd-jaeger + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-jaeger + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + prometheus.io/path: /metrics + prometheus.io/port: "8888" + prometheus.io/scrape: "true" + labels: + linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-jaeger + spec: + containers: + - image: jaegertracing/all-in-one:1.17.1 + imagePullPolicy: IfNotPresent + name: jaeger ports: - - containerPort: 55678 - - containerPort: 9411 - readinessProbe: - httpGet: - path: / - port: 13133 - volumeMounts: - - mountPath: /conf - name: linkerd-collector-config-val + - containerPort: 14268 + name: collection + - containerPort: 16686 + name: ui - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2563,41 +2401,179 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-collector + dnsPolicy: ClusterFirst + serviceAccountName: linkerd-jaeger volumes: - - configMap: - items: - - key: linkerd-collector-config - path: linkerd-collector-config.yaml - name: linkerd-collector-config - name: linkerd-collector-config-val - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Tracing Jaeger Service +### Prometheus ### --- +kind: ConfigMap apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- kind: Service +apiVersion: v1 metadata: - name: linkerd-jaeger + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: prometheus ports: - - name: collection - port: 14268 - - name: ui - port: 16686 + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -2605,44 +2581,67 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: linkerd-jaeger + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-jaeger + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: .Values.name template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - prometheus.io/path: /metrics - prometheus.io/port: "8888" - prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: .Values.name spec: + nodeSelector: + beta.kubernetes.io/os: linux containers: - - image: jaegertracing/all-in-one:1.17.1 + - args: + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent - name: jaeger + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 14268 - name: collection - - containerPort: 16686 - name: ui + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsUser: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2745,8 +2744,6 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 - - --outbound-ports-to-ignore - - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init @@ -2768,9 +2765,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - dnsPolicy: ClusterFirst - serviceAccountName: linkerd-jaeger + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data + - configMap: + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_add-on_overwrite.golden b/cli/cmd/testdata/upgrade_add-on_overwrite.golden index db5abaf51c647..42d63314d27cd 100644 --- a/cli/cmd/testdata/upgrade_add-on_overwrite.golden +++ b/cli/cmd/testdata/upgrade_add-on_overwrite.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Grafana RBAC ### --- @@ -1808,171 +1767,87 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Grafana ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + grafana.ini: |- + instance_name = linkerd-grafana - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] + [auth] + disable_login_form = true - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ + [auth.anonymous] + enabled = true + org_role = Editor - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + [auth.basic] + enabled = false - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop + [analytics] + check_for_updates = false - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + [panels] + disable_sanitize_html = true - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana ports: - - name: admin-http - port: 9090 - targetPort: 9090 + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -1980,20 +1855,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2001,45 +1876,39 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 + path: /api/health + port: 3000 initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + name: grafana ports: - - containerPort: 9090 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /api/health + port: 3000 securityContext: - runAsUser: 65534 + runAsUser: 472 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /etc/grafana + name: grafana-config readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2070,8 +1939,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2168,120 +2035,46 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2289,39 +2082,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2448,46 +2244,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2495,40 +2323,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2657,262 +2483,23 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +### +### Tap +### +--- kind: Service apiVersion: v1 metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -### -### Service Profile Validator -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-sp-validator - namespace: linkerd - labels: - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: sp-validator - ports: - - name: sp-validator - port: 443 - targetPort: sp-validator ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: sp-validator - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: sp-validator - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - sp-validator - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9997 - initialDelaySeconds: 10 - name: sp-validator - ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9997 - securityContext: - runAsUser: 2103 - volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator - volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Tap -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-tap + name: linkerd-tap namespace: linkerd labels: linkerd.io/control-plane-component: tap @@ -3151,6 +2738,11 @@ metadata: linkerd.io/created-by: linkerd/cli dev-undefined data: values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus tracing: collector: image: overwrite-collector-image @@ -3638,3 +3230,412 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: .Values.name + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: .Values.name + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsUser: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw + JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 + MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r + ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg + Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ + Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB + /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe + aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC + IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R + SxZLbJKt6SJIIY9dw5gzQpUQR2U= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + image: gcr.io/linkerd-io/proxy-init:v1.3.2 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-prometheus + volumes: + - emptyDir: {} + name: data + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_add_add-on.golden b/cli/cmd/testdata/upgrade_add_add-on.golden index fd1feac144609..671bed820baa6 100644 --- a/cli/cmd/testdata/upgrade_add_add-on.golden +++ b/cli/cmd/testdata/upgrade_add_add-on.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Grafana RBAC ### --- @@ -1808,171 +1767,87 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Grafana ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + grafana.ini: |- + instance_name = linkerd-grafana - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] + [auth] + disable_login_form = true - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ + [auth.anonymous] + enabled = true + org_role = Editor - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + [auth.basic] + enabled = false - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop + [analytics] + check_for_updates = false - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + [panels] + disable_sanitize_html = true - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana ports: - - name: admin-http - port: 9090 - targetPort: 9090 + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -1980,20 +1855,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2001,45 +1876,39 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 + path: /api/health + port: 3000 initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + name: grafana ports: - - containerPort: 9090 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /api/health + port: 3000 securityContext: - runAsUser: 65534 + runAsUser: 472 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /etc/grafana + name: grafana-config readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2070,8 +1939,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2168,120 +2035,46 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -2289,39 +2082,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2448,46 +2244,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2495,40 +2323,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2657,262 +2483,23 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +### +### Tap +### +--- kind: Service apiVersion: v1 metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -### -### Service Profile Validator -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-sp-validator - namespace: linkerd - labels: - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: sp-validator - ports: - - name: sp-validator - port: 443 - targetPort: sp-validator ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: sp-validator - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: sp-validator - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - sp-validator - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9997 - initialDelaySeconds: 10 - name: sp-validator - ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9997 - securityContext: - runAsUser: 2103 - volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator - volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Tap -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-tap + name: linkerd-tap namespace: linkerd labels: linkerd.io/control-plane-component: tap @@ -3151,6 +2738,11 @@ metadata: linkerd.io/created-by: linkerd/cli dev-undefined data: values: |- + prometheus: + enabled: true + image: prom/prometheus:v2.15.2 + logLevel: info + name: linkerd-prometheus tracing: collector: image: omnition/opencensus-collector:0.1.11 @@ -3640,3 +3232,412 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: .Values.name + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: .Values.name + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsUser: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw + JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 + MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r + ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg + Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ + Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB + /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe + aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC + IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R + SxZLbJKt6SJIIY9dw5gzQpUQR2U= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + image: gcr.io/linkerd-io/proxy-init:v1.3.2 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-prometheus + volumes: + - emptyDir: {} + name: data + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity From 79e15b32b0b2b7ab608676c30282dd3d239397b8 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Fri, 8 May 2020 19:28:46 +0530 Subject: [PATCH 07/42] update install metaValues test Signed-off-by: Tarun Pothulapati --- cli/cmd/install_test.go | 9 +- cli/cmd/testdata/install_output.golden | 911 +++++++++++++------------ 2 files changed, 462 insertions(+), 458 deletions(-) diff --git a/cli/cmd/install_test.go b/cli/cmd/install_test.go index 37d52981953e4..d2cc878f1ea90 100644 --- a/cli/cmd/install_test.go +++ b/cli/cmd/install_test.go @@ -66,9 +66,6 @@ func TestRender(t *testing.T) { InstallNamespace: true, Identity: defaultValues.Identity, NodeSelector: defaultValues.NodeSelector, - Prometheus: charts.Prometheus{ - "image": "PrometheusImage", - }, Global: &charts.Global{ Namespace: "Namespace", ClusterDomain: "cluster.local", @@ -132,6 +129,12 @@ func TestRender(t *testing.T) { Dashboard: &charts.Dashboard{ Replicas: 1, }, + Prometheus: charts.Prometheus{ + "enabled": true, + "image": "PrometheusImage", + "name": "linkerd-prometheus", + "logLevel": "info", + }, Tracing: map[string]interface{}{ "enabled": false, }, diff --git a/cli/cmd/testdata/install_output.golden b/cli/cmd/testdata/install_output.golden index ca1186befef65..5957234ff1262 100644 --- a/cli/cmd/testdata/install_output.golden +++ b/cli/cmd/testdata/install_output.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-Namespace-prometheus - labels: - ControllerComponentLabel: prometheus - ControllerNamespaceLabel: Namespace -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-Namespace-prometheus - labels: - ControllerComponentLabel: prometheus - ControllerNamespaceLabel: Namespace -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-Namespace-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: Namespace ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: Namespace - labels: - ControllerComponentLabel: prometheus - ControllerNamespaceLabel: Namespace ---- -### ### Grafana RBAC ### --- @@ -1796,171 +1755,87 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Grafana ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-prometheus-config + name: linkerd-grafana-config namespace: Namespace labels: - ControllerComponentLabel: prometheus + ControllerComponentLabel: grafana ControllerNamespaceLabel: Namespace annotations: CreatedByAnnotation: CliVersion data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + grafana.ini: |- + instance_name = linkerd-grafana - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] + [auth] + disable_login_form = true - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['Namespace'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ + [auth.anonymous] + enabled = true + org_role = Editor - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + [auth.basic] + enabled = false - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop + [analytics] + check_for_updates = false - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['Namespace'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + [panels] + disable_sanitize_html = true - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.Namespace.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;Namespace$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-prometheus + name: linkerd-grafana namespace: Namespace labels: - ControllerComponentLabel: prometheus + ControllerComponentLabel: grafana ControllerNamespaceLabel: Namespace annotations: CreatedByAnnotation: CliVersion spec: type: ClusterIP selector: - ControllerComponentLabel: prometheus + ControllerComponentLabel: grafana ports: - - name: admin-http - port: 9090 - targetPort: 9090 + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -1968,20 +1843,20 @@ metadata: annotations: CreatedByAnnotation: CliVersion labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: ControllerImageVersion - ControllerComponentLabel: prometheus + ControllerComponentLabel: grafana ControllerNamespaceLabel: Namespace - name: linkerd-prometheus + name: linkerd-grafana namespace: Namespace spec: replicas: 1 selector: matchLabels: - ControllerComponentLabel: prometheus + ControllerComponentLabel: grafana ControllerNamespaceLabel: Namespace - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -1989,45 +1864,39 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: ProxyVersion labels: - ControllerComponentLabel: prometheus + ControllerComponentLabel: grafana ControllerNamespaceLabel: Namespace WorkloadNamespaceLabel: Namespace - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=prometheusloglevel - image: PrometheusImage + - env: + - name: GF_PATHS_DATA + value: /data + image: GrafanaImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy livenessProbe: httpGet: - path: /-/healthy - port: 9090 + path: /api/health + port: 3000 initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + name: grafana ports: - - containerPort: 9090 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /api/health + port: 3000 securityContext: - runAsUser: 65534 + runAsUser: 472 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /etc/grafana + name: grafana-config readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2058,8 +1927,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2153,120 +2020,46 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: Namespace - labels: - ControllerComponentLabel: grafana - ControllerNamespaceLabel: Namespace - annotations: - CreatedByAnnotation: CliVersion -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.Namespace.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: Namespace - labels: - ControllerComponentLabel: grafana - ControllerNamespaceLabel: Namespace - annotations: - CreatedByAnnotation: CliVersion -spec: - type: ClusterIP - selector: - ControllerComponentLabel: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: CreatedByAnnotation: CliVersion labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: ControllerImageVersion - ControllerComponentLabel: grafana + ControllerComponentLabel: proxy-injector ControllerNamespaceLabel: Namespace - name: linkerd-grafana + name: linkerd-proxy-injector namespace: Namespace spec: replicas: 1 selector: matchLabels: - ControllerComponentLabel: grafana - ControllerNamespaceLabel: Namespace - linkerd.io/proxy-deployment: linkerd-grafana + ControllerComponentLabel: proxy-injector template: metadata: annotations: @@ -2274,39 +2067,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: ProxyVersion labels: - ControllerComponentLabel: grafana + ControllerComponentLabel: proxy-injector ControllerNamespaceLabel: Namespace WorkloadNamespaceLabel: Namespace - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: GrafanaImage:ControllerImageVersion + - args: + - proxy-injector + - -log-level=ControllerLogLevel + image: ControllerImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2430,46 +2226,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: Namespace + labels: + ControllerComponentLabel: proxy-injector + ControllerNamespaceLabel: Namespace + annotations: + CreatedByAnnotation: CliVersion +spec: + type: ClusterIP + selector: + ControllerComponentLabel: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: Namespace + labels: + ControllerComponentLabel: sp-validator + ControllerNamespaceLabel: Namespace + annotations: + CreatedByAnnotation: CliVersion +spec: + type: ClusterIP + selector: + ControllerComponentLabel: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: CreatedByAnnotation: CliVersion labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: ControllerImageVersion - ControllerComponentLabel: proxy-injector + ControllerComponentLabel: sp-validator ControllerNamespaceLabel: Namespace - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: Namespace spec: replicas: 1 selector: matchLabels: - ControllerComponentLabel: proxy-injector + ControllerComponentLabel: sp-validator template: metadata: annotations: @@ -2477,40 +2305,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: ProxyVersion labels: - ControllerComponentLabel: proxy-injector + ControllerComponentLabel: sp-validator ControllerNamespaceLabel: Namespace WorkloadNamespaceLabel: Namespace - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=ControllerLogLevel image: ControllerImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2636,78 +2462,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +### +### Tap +### +--- kind: Service apiVersion: v1 metadata: - name: linkerd-proxy-injector + name: linkerd-tap namespace: Namespace labels: - ControllerComponentLabel: proxy-injector + ControllerComponentLabel: tap ControllerNamespaceLabel: Namespace annotations: CreatedByAnnotation: CliVersion spec: type: ClusterIP selector: - ControllerComponentLabel: proxy-injector + ControllerComponentLabel: tap ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -### -### Service Profile Validator -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-sp-validator - namespace: Namespace - labels: - ControllerComponentLabel: sp-validator - ControllerNamespaceLabel: Namespace - annotations: - CreatedByAnnotation: CliVersion -spec: - type: ClusterIP - selector: - ControllerComponentLabel: sp-validator - ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: CreatedByAnnotation: CliVersion labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: ControllerImageVersion - ControllerComponentLabel: sp-validator + ControllerComponentLabel: tap ControllerNamespaceLabel: Namespace - name: linkerd-sp-validator + name: linkerd-tap namespace: Namespace spec: replicas: 1 selector: matchLabels: - ControllerComponentLabel: sp-validator + ControllerComponentLabel: tap + ControllerNamespaceLabel: Namespace + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2715,41 +2524,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: ProxyVersion labels: - ControllerComponentLabel: sp-validator + ControllerComponentLabel: tap ControllerNamespaceLabel: Namespace WorkloadNamespaceLabel: Namespace - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=Namespace - -log-level=ControllerLogLevel image: ControllerImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2872,61 +2686,270 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: Namespace + labels: + ControllerNamespaceLabel: Namespace + annotations: + CreatedByAnnotation: CliVersion +data: + values: |- + prometheus: + enabled: true + image: PrometheusImage + logLevel: info + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: Namespace-linkerd-prometheus + labels: + ControllerComponentLabel: prometheus + ControllerNamespaceLabel: Namespace +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: Namespace-linkerd-prometheus + labels: + ControllerComponentLabel: prometheus + ControllerNamespaceLabel: Namespace +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: Namespace-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: Namespace +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: Namespace + labels: + ControllerComponentLabel: prometheus + ControllerNamespaceLabel: Namespace +--- +### +### Prometheus ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: Namespace + labels: + ControllerComponentLabel: prometheus + ControllerNamespaceLabel: Namespace + annotations: + CreatedByAnnotation: CliVersion +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['Namespace'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['Namespace'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;Namespace$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: Namespace labels: - ControllerComponentLabel: tap + ControllerComponentLabel: prometheus ControllerNamespaceLabel: Namespace annotations: CreatedByAnnotation: CliVersion spec: type: ClusterIP selector: - ControllerComponentLabel: tap + ControllerComponentLabel: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: CreatedByAnnotation: CliVersion labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: ControllerImageVersion - ControllerComponentLabel: tap + ControllerComponentLabel: prometheus ControllerNamespaceLabel: Namespace - name: linkerd-tap + name: linkerd-prometheus namespace: Namespace spec: replicas: 1 selector: matchLabels: - ControllerComponentLabel: tap + ControllerComponentLabel: prometheus ControllerNamespaceLabel: Namespace - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name template: metadata: annotations: @@ -2934,46 +2957,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: ProxyVersion labels: - ControllerComponentLabel: tap + ControllerComponentLabel: prometheus ControllerNamespaceLabel: Namespace WorkloadNamespaceLabel: Namespace - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: .Values.name spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - tap - - -controller-namespace=Namespace - - -log-level=ControllerLogLevel - image: ControllerImage:ControllerImageVersion + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: PrometheusImage imagePullPolicy: ImagePullPolicy livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 2103 + runAsUser: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3073,8 +3096,6 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 - - --outbound-ports-to-ignore - - "443" image: ProxyInitImageName:ProxyInitVersion imagePullPolicy: ImagePullPolicy name: linkerd-init @@ -3096,33 +3117,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: Namespace - labels: - ControllerNamespaceLabel: Namespace - annotations: - CreatedByAnnotation: CliVersion -data: - values: |- - tracing: - enabled: false From be3d3eaa9d1d75b950e94c5755424ddd5f430c97 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Sun, 10 May 2020 21:02:07 +0530 Subject: [PATCH 08/42] fix template correctly Signed-off-by: Tarun Pothulapati --- charts/add-ons/prometheus/templates/prometheus.yaml | 2 +- cli/cmd/testdata/install_addon_control-plane.golden | 8 ++++++-- cli/cmd/testdata/install_control-plane.golden | 8 ++++++-- .../testdata/install_controlplane_tracing_output.golden | 8 ++++++-- cli/cmd/testdata/install_custom_registry.golden | 8 ++++++-- cli/cmd/testdata/install_default.golden | 8 ++++++-- cli/cmd/testdata/install_ha_output.golden | 8 ++++++-- cli/cmd/testdata/install_ha_with_overrides_output.golden | 8 ++++++-- cli/cmd/testdata/install_heartbeat_disabled_output.golden | 8 ++++++-- cli/cmd/testdata/install_helm_output.golden | 8 +++++--- cli/cmd/testdata/install_helm_output_addons.golden | 8 +++++--- cli/cmd/testdata/install_helm_output_ha.golden | 8 +++++--- cli/cmd/testdata/install_no_init_container.golden | 6 ++++-- cli/cmd/testdata/install_output.golden | 8 ++++++-- cli/cmd/testdata/install_proxy_ignores.golden | 8 +++++--- cli/cmd/testdata/install_restricted_dashboard.golden | 8 ++++++-- cli/cmd/testdata/install_tracing.golden | 8 ++++++-- cli/cmd/testdata/upgrade_add-on_controlplane.golden | 8 ++++++-- cli/cmd/testdata/upgrade_add-on_overwrite.golden | 8 ++++++-- cli/cmd/testdata/upgrade_add_add-on.golden | 8 ++++++-- cli/cmd/testdata/upgrade_default.golden | 8 ++++++-- cli/cmd/testdata/upgrade_external_issuer.golden | 8 ++++++-- cli/cmd/testdata/upgrade_ha.golden | 8 ++++++-- cli/cmd/testdata/upgrade_overwrite_issuer.golden | 8 ++++++-- ...upgrade_overwrite_trust_anchors-external-issuer.golden | 8 ++++++-- cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden | 8 ++++++-- 26 files changed, 145 insertions(+), 55 deletions(-) diff --git a/charts/add-ons/prometheus/templates/prometheus.yaml b/charts/add-ons/prometheus/templates/prometheus.yaml index 06f8531446b74..8f2b37fe51431 100644 --- a/charts/add-ons/prometheus/templates/prometheus.yaml +++ b/charts/add-ons/prometheus/templates/prometheus.yaml @@ -175,7 +175,7 @@ spec: {{ $_ := set .Values.global.proxy.image "Version" .Values.global.linkerdVersion -}} {{ end -}} {{ $_ := set .Values.global.proxy "workloadKind" "deployment" -}} -{{ $_ := set .Values.global.proxy "component" ".Values.name" -}} +{{ $_ := set .Values.global.proxy "component" .Values.name -}} {{ include "linkerd.proxy.validation" .Values.global.proxy -}} apiVersion: apps/v1 kind: Deployment diff --git a/cli/cmd/testdata/install_addon_control-plane.golden b/cli/cmd/testdata/install_addon_control-plane.golden index 919b6ec2be2d3..da245d96ec899 100644 --- a/cli/cmd/testdata/install_addon_control-plane.golden +++ b/cli/cmd/testdata/install_addon_control-plane.golden @@ -2577,7 +2577,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2588,7 +2588,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux @@ -2654,6 +2654,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2725,6 +2727,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 + - --outbound-ports-to-ignore + - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init diff --git a/cli/cmd/testdata/install_control-plane.golden b/cli/cmd/testdata/install_control-plane.golden index d7db80a8807ae..ea6c2c1c1639c 100644 --- a/cli/cmd/testdata/install_control-plane.golden +++ b/cli/cmd/testdata/install_control-plane.golden @@ -2117,7 +2117,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2128,7 +2128,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux @@ -2194,6 +2194,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2265,6 +2267,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 + - --outbound-ports-to-ignore + - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init diff --git a/cli/cmd/testdata/install_controlplane_tracing_output.golden b/cli/cmd/testdata/install_controlplane_tracing_output.golden index dae781f8610aa..2538fc69880ca 100644 --- a/cli/cmd/testdata/install_controlplane_tracing_output.golden +++ b/cli/cmd/testdata/install_controlplane_tracing_output.golden @@ -3074,7 +3074,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -3085,7 +3085,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux @@ -3153,6 +3153,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3230,6 +3232,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 + - --outbound-ports-to-ignore + - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init diff --git a/cli/cmd/testdata/install_custom_registry.golden b/cli/cmd/testdata/install_custom_registry.golden index ea36851ba6d91..a399006ecdaf9 100644 --- a/cli/cmd/testdata/install_custom_registry.golden +++ b/cli/cmd/testdata/install_custom_registry.golden @@ -2957,7 +2957,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2968,7 +2968,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux @@ -3034,6 +3034,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3105,6 +3107,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 + - --outbound-ports-to-ignore + - "443" image: my.custom.registry/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init diff --git a/cli/cmd/testdata/install_default.golden b/cli/cmd/testdata/install_default.golden index af389edbce57e..d7cf42e45bbb1 100644 --- a/cli/cmd/testdata/install_default.golden +++ b/cli/cmd/testdata/install_default.golden @@ -2957,7 +2957,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2968,7 +2968,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux @@ -3034,6 +3034,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3105,6 +3107,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 + - --outbound-ports-to-ignore + - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init diff --git a/cli/cmd/testdata/install_ha_output.golden b/cli/cmd/testdata/install_ha_output.golden index 1d9089e86fd94..6eaf948f7bb3b 100644 --- a/cli/cmd/testdata/install_ha_output.golden +++ b/cli/cmd/testdata/install_ha_output.golden @@ -3213,7 +3213,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -3224,7 +3224,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux @@ -3297,6 +3297,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3374,6 +3376,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 + - --outbound-ports-to-ignore + - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init diff --git a/cli/cmd/testdata/install_ha_with_overrides_output.golden b/cli/cmd/testdata/install_ha_with_overrides_output.golden index 0ab1fca014a2e..e83eb9c446248 100644 --- a/cli/cmd/testdata/install_ha_with_overrides_output.golden +++ b/cli/cmd/testdata/install_ha_with_overrides_output.golden @@ -3213,7 +3213,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -3224,7 +3224,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux @@ -3297,6 +3297,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3374,6 +3376,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 + - --outbound-ports-to-ignore + - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init diff --git a/cli/cmd/testdata/install_heartbeat_disabled_output.golden b/cli/cmd/testdata/install_heartbeat_disabled_output.golden index 148a65b253275..db4ebacae6d83 100644 --- a/cli/cmd/testdata/install_heartbeat_disabled_output.golden +++ b/cli/cmd/testdata/install_heartbeat_disabled_output.golden @@ -2868,7 +2868,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2879,7 +2879,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux @@ -2945,6 +2945,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3016,6 +3018,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 + - --outbound-ports-to-ignore + - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init diff --git a/cli/cmd/testdata/install_helm_output.golden b/cli/cmd/testdata/install_helm_output.golden index 277850dafe097..e735aecaba502 100644 --- a/cli/cmd/testdata/install_helm_output.golden +++ b/cli/cmd/testdata/install_helm_output.golden @@ -3002,7 +3002,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -3013,7 +3013,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: null @@ -3079,6 +3079,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3142,7 +3144,7 @@ spec: - --inbound-ports-to-ignore - 4190,4191,222 - --outbound-ports-to-ignore - - "111" + - 443,111 image: gcr.io/linkerd-io/proxy-init:test-proxy-init-version imagePullPolicy: IfNotPresent name: linkerd-init diff --git a/cli/cmd/testdata/install_helm_output_addons.golden b/cli/cmd/testdata/install_helm_output_addons.golden index 97923d41a37ed..670d48367fa87 100644 --- a/cli/cmd/testdata/install_helm_output_addons.golden +++ b/cli/cmd/testdata/install_helm_output_addons.golden @@ -3474,7 +3474,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -3485,7 +3485,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: null @@ -3551,6 +3551,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3614,7 +3616,7 @@ spec: - --inbound-ports-to-ignore - 4190,4191,222 - --outbound-ports-to-ignore - - "111" + - 443,111 image: gcr.io/linkerd-io/proxy-init:test-proxy-init-version imagePullPolicy: IfNotPresent name: linkerd-init diff --git a/cli/cmd/testdata/install_helm_output_ha.golden b/cli/cmd/testdata/install_helm_output_ha.golden index 272675423562f..d92dca85c7482 100644 --- a/cli/cmd/testdata/install_helm_output_ha.golden +++ b/cli/cmd/testdata/install_helm_output_ha.golden @@ -3258,7 +3258,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -3269,7 +3269,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: null @@ -3342,6 +3342,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3411,7 +3413,7 @@ spec: - --inbound-ports-to-ignore - 4190,4191,222 - --outbound-ports-to-ignore - - "111" + - 443,111 image: gcr.io/linkerd-io/proxy-init:test-proxy-init-version imagePullPolicy: IfNotPresent name: linkerd-init diff --git a/cli/cmd/testdata/install_no_init_container.golden b/cli/cmd/testdata/install_no_init_container.golden index e76216bfd4d3b..81f8a6ef74ef5 100644 --- a/cli/cmd/testdata/install_no_init_container.golden +++ b/cli/cmd/testdata/install_no_init_container.golden @@ -2690,7 +2690,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2701,7 +2701,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux @@ -2767,6 +2767,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS diff --git a/cli/cmd/testdata/install_output.golden b/cli/cmd/testdata/install_output.golden index 5957234ff1262..5f17742d55b86 100644 --- a/cli/cmd/testdata/install_output.golden +++ b/cli/cmd/testdata/install_output.golden @@ -2949,7 +2949,7 @@ spec: matchLabels: ControllerComponentLabel: prometheus ControllerNamespaceLabel: Namespace - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2960,7 +2960,7 @@ spec: ControllerComponentLabel: prometheus ControllerNamespaceLabel: Namespace WorkloadNamespaceLabel: Namespace - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux @@ -3026,6 +3026,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3096,6 +3098,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 + - --outbound-ports-to-ignore + - "443" image: ProxyInitImageName:ProxyInitVersion imagePullPolicy: ImagePullPolicy name: linkerd-init diff --git a/cli/cmd/testdata/install_proxy_ignores.golden b/cli/cmd/testdata/install_proxy_ignores.golden index d30aef0e828d9..8c6c2dff0a2b2 100644 --- a/cli/cmd/testdata/install_proxy_ignores.golden +++ b/cli/cmd/testdata/install_proxy_ignores.golden @@ -2957,7 +2957,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2968,7 +2968,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux @@ -3034,6 +3034,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3106,7 +3108,7 @@ spec: - --inbound-ports-to-ignore - 4190,4191,22,8100-8102 - --outbound-ports-to-ignore - - "5432" + - 443,5432 image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init diff --git a/cli/cmd/testdata/install_restricted_dashboard.golden b/cli/cmd/testdata/install_restricted_dashboard.golden index 39849976d3f20..e2765bec1b164 100644 --- a/cli/cmd/testdata/install_restricted_dashboard.golden +++ b/cli/cmd/testdata/install_restricted_dashboard.golden @@ -2892,7 +2892,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2903,7 +2903,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux @@ -2969,6 +2969,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3040,6 +3042,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 + - --outbound-ports-to-ignore + - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init diff --git a/cli/cmd/testdata/install_tracing.golden b/cli/cmd/testdata/install_tracing.golden index f36807b8895c7..de1cd08a71f4e 100644 --- a/cli/cmd/testdata/install_tracing.golden +++ b/cli/cmd/testdata/install_tracing.golden @@ -3443,7 +3443,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -3454,7 +3454,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux @@ -3520,6 +3520,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3591,6 +3593,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 + - --outbound-ports-to-ignore + - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init diff --git a/cli/cmd/testdata/upgrade_add-on_controlplane.golden b/cli/cmd/testdata/upgrade_add-on_controlplane.golden index 763889354ba90..62eacf8c83619 100644 --- a/cli/cmd/testdata/upgrade_add-on_controlplane.golden +++ b/cli/cmd/testdata/upgrade_add-on_controlplane.golden @@ -2597,7 +2597,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2608,7 +2608,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux @@ -2674,6 +2674,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2747,6 +2749,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 + - --outbound-ports-to-ignore + - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init diff --git a/cli/cmd/testdata/upgrade_add-on_overwrite.golden b/cli/cmd/testdata/upgrade_add-on_overwrite.golden index 007ca681c5d44..b4faecc8f65e8 100644 --- a/cli/cmd/testdata/upgrade_add-on_overwrite.golden +++ b/cli/cmd/testdata/upgrade_add-on_overwrite.golden @@ -3461,7 +3461,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -3472,7 +3472,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux @@ -3538,6 +3538,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3611,6 +3613,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 + - --outbound-ports-to-ignore + - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init diff --git a/cli/cmd/testdata/upgrade_add_add-on.golden b/cli/cmd/testdata/upgrade_add_add-on.golden index 86bc595093fc1..600daef974b85 100644 --- a/cli/cmd/testdata/upgrade_add_add-on.golden +++ b/cli/cmd/testdata/upgrade_add_add-on.golden @@ -3463,7 +3463,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -3474,7 +3474,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux @@ -3540,6 +3540,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3613,6 +3615,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 + - --outbound-ports-to-ignore + - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init diff --git a/cli/cmd/testdata/upgrade_default.golden b/cli/cmd/testdata/upgrade_default.golden index 3291c0bcf4ed4..e1ecaa3627440 100644 --- a/cli/cmd/testdata/upgrade_default.golden +++ b/cli/cmd/testdata/upgrade_default.golden @@ -2973,7 +2973,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2984,7 +2984,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux @@ -3050,6 +3050,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3123,6 +3125,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191,2525-2527,2529 + - --outbound-ports-to-ignore + - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init diff --git a/cli/cmd/testdata/upgrade_external_issuer.golden b/cli/cmd/testdata/upgrade_external_issuer.golden index b541fc3a23a1f..9f6cab9736a5d 100644 --- a/cli/cmd/testdata/upgrade_external_issuer.golden +++ b/cli/cmd/testdata/upgrade_external_issuer.golden @@ -2959,7 +2959,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2970,7 +2970,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux @@ -3036,6 +3036,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3109,6 +3111,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 + - --outbound-ports-to-ignore + - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init diff --git a/cli/cmd/testdata/upgrade_ha.golden b/cli/cmd/testdata/upgrade_ha.golden index 1b4addccc1f7d..8d28a4040916c 100644 --- a/cli/cmd/testdata/upgrade_ha.golden +++ b/cli/cmd/testdata/upgrade_ha.golden @@ -3229,7 +3229,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -3240,7 +3240,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux @@ -3313,6 +3313,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3392,6 +3394,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 + - --outbound-ports-to-ignore + - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init diff --git a/cli/cmd/testdata/upgrade_overwrite_issuer.golden b/cli/cmd/testdata/upgrade_overwrite_issuer.golden index 9b223cee38738..9d6c43f55ce35 100644 --- a/cli/cmd/testdata/upgrade_overwrite_issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_issuer.golden @@ -2957,7 +2957,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2968,7 +2968,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux @@ -3034,6 +3034,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3105,6 +3107,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 + - --outbound-ports-to-ignore + - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden index 752769d1eeccf..8ab28e5d07b99 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden @@ -2943,7 +2943,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2954,7 +2954,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux @@ -3020,6 +3020,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3091,6 +3093,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 + - --outbound-ports-to-ignore + - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden index 9b223cee38738..9d6c43f55ce35 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden @@ -2957,7 +2957,7 @@ spec: matchLabels: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2968,7 +2968,7 @@ spec: linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: .Values.name + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux @@ -3034,6 +3034,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3105,6 +3107,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 + - --outbound-ports-to-ignore + - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init From ab854df2d161d9494d7ede2bc3a87c377f4625da Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Sun, 10 May 2020 21:42:06 +0530 Subject: [PATCH 09/42] remove linkerd-prometheus healthchecks Signed-off-by: Tarun Pothulapati --- pkg/healthcheck/healthcheck.go | 2 - pkg/healthcheck/healthcheck_test.go | 139 +------------------ test/serviceaccounts/serviceaccounts_test.go | 15 +- 3 files changed, 10 insertions(+), 146 deletions(-) diff --git a/pkg/healthcheck/healthcheck.go b/pkg/healthcheck/healthcheck.go index 1caee99c34f20..e91fee7815184 100644 --- a/pkg/healthcheck/healthcheck.go +++ b/pkg/healthcheck/healthcheck.go @@ -192,7 +192,6 @@ var ExpectedServiceAccountNames = []string{ "linkerd-destination", "linkerd-grafana", "linkerd-identity", - "linkerd-prometheus", "linkerd-proxy-injector", "linkerd-sp-validator", "linkerd-web", @@ -1764,7 +1763,6 @@ func (hc *HealthChecker) expectedRBACNames() []string { return []string{ fmt.Sprintf("linkerd-%s-controller", hc.ControlPlaneNamespace), fmt.Sprintf("linkerd-%s-identity", hc.ControlPlaneNamespace), - fmt.Sprintf("linkerd-%s-prometheus", hc.ControlPlaneNamespace), fmt.Sprintf("linkerd-%s-proxy-injector", hc.ControlPlaneNamespace), fmt.Sprintf("linkerd-%s-sp-validator", hc.ControlPlaneNamespace), fmt.Sprintf("linkerd-%s-tap", hc.ControlPlaneNamespace), diff --git a/pkg/healthcheck/healthcheck_test.go b/pkg/healthcheck/healthcheck_test.go index a19d9141bd0bd..ee5db97b2c928 100644 --- a/pkg/healthcheck/healthcheck_test.go +++ b/pkg/healthcheck/healthcheck_test.go @@ -423,7 +423,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: extension-apiserver-authentication + name: extension-apiserver-authentication namespace: kube-system data: %s : 'bar' @@ -570,7 +570,7 @@ metadata: }, []string{ "linkerd-config control plane Namespace exists", - "linkerd-config control plane ClusterRoles exist: missing ClusterRoles: linkerd-test-ns-controller, linkerd-test-ns-identity, linkerd-test-ns-prometheus, linkerd-test-ns-proxy-injector, linkerd-test-ns-sp-validator, linkerd-test-ns-tap", + "linkerd-config control plane ClusterRoles exist: missing ClusterRoles: linkerd-test-ns-controller, linkerd-test-ns-identity, linkerd-test-ns-proxy-injector, linkerd-test-ns-sp-validator, linkerd-test-ns-tap", }, }, { @@ -599,14 +599,6 @@ metadata: ` kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-test-ns-prometheus - labels: - linkerd.io/control-plane-ns: test-ns -`, - ` -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-test-ns-proxy-injector labels: @@ -632,7 +624,7 @@ metadata: []string{ "linkerd-config control plane Namespace exists", "linkerd-config control plane ClusterRoles exist", - "linkerd-config control plane ClusterRoleBindings exist: missing ClusterRoleBindings: linkerd-test-ns-controller, linkerd-test-ns-identity, linkerd-test-ns-prometheus, linkerd-test-ns-proxy-injector, linkerd-test-ns-sp-validator, linkerd-test-ns-tap", + "linkerd-config control plane ClusterRoleBindings exist: missing ClusterRoleBindings: linkerd-test-ns-controller, linkerd-test-ns-identity, linkerd-test-ns-proxy-injector, linkerd-test-ns-sp-validator, linkerd-test-ns-tap", }, }, { @@ -661,14 +653,6 @@ metadata: ` kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-test-ns-prometheus - labels: - linkerd.io/control-plane-ns: test-ns -`, - ` -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-test-ns-proxy-injector labels: @@ -709,14 +693,6 @@ metadata: ` kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-test-ns-prometheus - labels: - linkerd.io/control-plane-ns: test-ns -`, - ` -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-test-ns-proxy-injector labels: @@ -768,15 +744,6 @@ metadata: ` kind: ServiceAccount apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: test-ns - labels: - linkerd.io/control-plane-ns: test-ns -`, - ` -kind: ServiceAccount -apiVersion: v1 metadata: name: linkerd-proxy-injector namespace: test-ns @@ -872,14 +839,6 @@ metadata: ` kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-test-ns-prometheus - labels: - linkerd.io/control-plane-ns: test-ns -`, - ` -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-test-ns-proxy-injector labels: @@ -920,14 +879,6 @@ metadata: ` kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-test-ns-prometheus - labels: - linkerd.io/control-plane-ns: test-ns -`, - ` -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-test-ns-proxy-injector labels: @@ -979,15 +930,6 @@ metadata: ` kind: ServiceAccount apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: test-ns - labels: - linkerd.io/control-plane-ns: test-ns -`, - ` -kind: ServiceAccount -apiVersion: v1 metadata: name: linkerd-proxy-injector namespace: test-ns @@ -1092,14 +1034,6 @@ metadata: ` kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-test-ns-prometheus - labels: - linkerd.io/control-plane-ns: test-ns -`, - ` -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-test-ns-proxy-injector labels: @@ -1140,14 +1074,6 @@ metadata: ` kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-test-ns-prometheus - labels: - linkerd.io/control-plane-ns: test-ns -`, - ` -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-test-ns-proxy-injector labels: @@ -1199,15 +1125,6 @@ metadata: ` kind: ServiceAccount apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: test-ns - labels: - linkerd.io/control-plane-ns: test-ns -`, - ` -kind: ServiceAccount -apiVersion: v1 metadata: name: linkerd-proxy-injector namespace: test-ns @@ -1321,14 +1238,6 @@ metadata: ` kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-test-ns-prometheus - labels: - linkerd.io/control-plane-ns: test-ns -`, - ` -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-test-ns-proxy-injector labels: @@ -1369,14 +1278,6 @@ metadata: ` kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-test-ns-prometheus - labels: - linkerd.io/control-plane-ns: test-ns -`, - ` -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-test-ns-proxy-injector labels: @@ -1428,15 +1329,6 @@ metadata: ` kind: ServiceAccount apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: test-ns - labels: - linkerd.io/control-plane-ns: test-ns -`, - ` -kind: ServiceAccount -apiVersion: v1 metadata: name: linkerd-proxy-injector namespace: test-ns @@ -1559,14 +1451,6 @@ metadata: ` kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-test-ns-prometheus - labels: - linkerd.io/control-plane-ns: test-ns -`, - ` -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-test-ns-proxy-injector labels: @@ -1607,14 +1491,6 @@ metadata: ` kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-test-ns-prometheus - labels: - linkerd.io/control-plane-ns: test-ns -`, - ` -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-test-ns-proxy-injector labels: @@ -1666,15 +1542,6 @@ metadata: ` kind: ServiceAccount apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: test-ns - labels: - linkerd.io/control-plane-ns: test-ns -`, - ` -kind: ServiceAccount -apiVersion: v1 metadata: name: linkerd-proxy-injector namespace: test-ns diff --git a/test/serviceaccounts/serviceaccounts_test.go b/test/serviceaccounts/serviceaccounts_test.go index 730cafdd403bb..ea296c0845078 100644 --- a/test/serviceaccounts/serviceaccounts_test.go +++ b/test/serviceaccounts/serviceaccounts_test.go @@ -16,14 +16,13 @@ func TestMain(m *testing.M) { os.Exit(m.Run()) } +// namesMatch checks if all the expectedServiceAccountNames are present in the given list, +// The passed argument list is allowed to contain extra members. func namesMatch(names []string) bool { - for _, name := range names { - if name == "default" || name == "linkerd-heartbeat" { - continue - } + for _, expectedname := range healthcheck.ExpectedServiceAccountNames { found := false - for _, expectedName := range healthcheck.ExpectedServiceAccountNames { - if name == expectedName { + for _, name := range names { + if expectedname == name { found = true break } @@ -53,7 +52,7 @@ func TestServiceAccountsMatch(t *testing.T) { saNames = append(saNames, strings.TrimPrefix(name, "serviceaccount/")) } // disregard `default` and `linkerd-heartbeat` - if len(saNames)-2 != len(expectedNames) || !namesMatch(saNames) { + if len(saNames) < len(expectedNames) || !namesMatch(saNames) { testutil.Fatalf(t, "the service account list doesn't match the expected list: %s", expectedNames) } @@ -68,7 +67,7 @@ func TestServiceAccountsMatch(t *testing.T) { } saNamesPSP := strings.Split(res, " ") // disregard `linkerd-heartbeat` - if len(saNamesPSP)-1 != len(expectedNames) || !namesMatch(saNamesPSP) { + if len(saNamesPSP) < len(expectedNames) || !namesMatch(saNamesPSP) { t.Fatalf( "The service accounts in the linkerd-psp rolebindings don't match the expected list: %s", expectedNames) From d54ad31e586dca2be157657b8183d295c26f1274 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Mon, 11 May 2020 16:07:00 +0530 Subject: [PATCH 10/42] add more fields for configuration Signed-off-by: Tarun Pothulapati --- .../prometheus/templates/prometheus.yaml | 30 +++++++++++-------- charts/linkerd2/values.yaml | 25 +++++++--------- cli/cmd/install.go | 2 +- cli/cmd/install_test.go | 7 ++--- .../install_addon_control-plane.golden | 16 +++++++--- cli/cmd/testdata/install_control-plane.golden | 16 +++++++--- ...install_controlplane_tracing_output.golden | 16 +++++++--- .../testdata/install_custom_registry.golden | 16 +++++++--- cli/cmd/testdata/install_default.golden | 16 +++++++--- cli/cmd/testdata/install_ha_output.golden | 16 +++++++--- .../install_ha_with_overrides_output.golden | 16 +++++++--- .../install_heartbeat_disabled_output.golden | 16 +++++++--- cli/cmd/testdata/install_helm_output.golden | 16 +++++++--- .../install_helm_output_addons.golden | 16 +++++++--- .../testdata/install_helm_output_ha.golden | 16 +++++++--- .../testdata/install_no_init_container.golden | 16 +++++++--- cli/cmd/testdata/install_output.golden | 10 +------ cli/cmd/testdata/install_proxy_ignores.golden | 16 +++++++--- .../install_restricted_dashboard.golden | 16 +++++++--- cli/cmd/testdata/install_tracing.golden | 16 +++++++--- .../upgrade_add-on_controlplane.golden | 16 +++++++--- .../testdata/upgrade_add-on_overwrite.golden | 16 +++++++--- cli/cmd/testdata/upgrade_add_add-on.golden | 16 +++++++--- cli/cmd/testdata/upgrade_default.golden | 16 +++++++--- .../testdata/upgrade_external_issuer.golden | 16 +++++++--- cli/cmd/testdata/upgrade_ha.golden | 16 +++++++--- .../testdata/upgrade_overwrite_issuer.golden | 16 +++++++--- ...write_trust_anchors-external-issuer.golden | 16 +++++++--- .../upgrade_overwrite_trust_anchors.golden | 16 +++++++--- 29 files changed, 321 insertions(+), 137 deletions(-) diff --git a/charts/add-ons/prometheus/templates/prometheus.yaml b/charts/add-ons/prometheus/templates/prometheus.yaml index 8f2b37fe51431..53a1dff22fdc3 100644 --- a/charts/add-ons/prometheus/templates/prometheus.yaml +++ b/charts/add-ons/prometheus/templates/prometheus.yaml @@ -15,15 +15,10 @@ metadata: {{.Values.global.createdByAnnotation}}: {{default (printf "linkerd/helm %s" .Values.global.linkerdVersion) .Values.global.cliVersion}} data: prometheus.yml: |- - {{- if .Values.alertmanagers }} - alerting: - alertmanagers: - {{- toYaml .Values.alertmanagers | trim | nindent 8 }} - {{- end }} + {{ if .Values.globalConfig -}} global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + {{- toYaml .Values.globalConfig | trim | nindent 6 }} + {{- end}} rule_files: - /etc/prometheus/*_rules.yml @@ -151,6 +146,21 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + + {{- if .Values.scrapeConfigs }} + {{ toYaml .Values.scrapeConfigs | trim | nindent 4 }} + {{ end }} + + {{- if .Values.alertManagers }} + alerting: + alertmanagers: + {{- toYaml .Values.alertManagers | trim | nindent 6 }} + {{- end }} + + {{- if .Values.remoteWrite }} + remote_write: + {{- toYaml .Values.remoteWrite | trim | nindent 4 }} + {{- end }} --- kind: Service apiVersion: v1 @@ -211,10 +221,6 @@ spec: {{- include "linkerd.node-selector" . | nindent 6 }} containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level={{lower .Values.logLevel}} {{- range $key, $value := .Values.args}} - --{{ $key }}{{ if $value }}={{ $value }}{{ end }} {{- end }} diff --git a/charts/linkerd2/values.yaml b/charts/linkerd2/values.yaml index a9b6f16f08ba5..8dc2ddb2cadbc 100644 --- a/charts/linkerd2/values.yaml +++ b/charts/linkerd2/values.yaml @@ -175,26 +175,21 @@ smiMetrics: keyPEM: | # Configuration for Add-ons +# Full configuration fields https://github.com/linkerd/linkerd2/tree/master/charts/linkerd2#add-ons-configuration prometheus: enabled: true name: linkerd-prometheus image: prom/prometheus:v2.15.2 - logLevel: *controller_log_level - # args: - # - log.format: json - # alertManagers: - # - scheme: http - # static_configs: - # - targets: - # - "alertmanager.linkerd.svc:9093" - # ruleConfigMapMounts: - # - name: alerting-rules - # subPath: alerting_rules.yml - # configMap: linkerd-prometheus-rules - # - name: recording-rules - # subPath: recording_rules.yml - # configMap: linkerd-prometheus-rules + args: + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + config.file: /etc/prometheus/prometheus.yml + log.level: *controller_log_level + globalConfig: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s tracing: enabled: false diff --git a/cli/cmd/install.go b/cli/cmd/install.go index c60ef4f2d53b9..0c0b89d47221f 100644 --- a/cli/cmd/install.go +++ b/cli/cmd/install.go @@ -770,7 +770,7 @@ func (options *installOptions) buildValuesWithoutIdentity(configs *pb.All) (*l5d installValues.Global.Namespace = controlPlaneNamespace installValues.Global.CNIEnabled = options.cniEnabled installValues.OmitWebhookSideEffects = options.omitWebhookSideEffects - installValues.Prometheus["logLevel"] = toPromLogLevel(strings.ToLower(options.controllerLogLevel)) + installValues.Prometheus["args"].(map[string]interface{})["log.level"] = toPromLogLevel(strings.ToLower(options.controllerLogLevel)) installValues.HeartbeatSchedule = options.heartbeatSchedule() installValues.RestrictDashboardPrivileges = options.restrictDashboardPrivileges installValues.DisableHeartBeat = options.disableHeartbeat diff --git a/cli/cmd/install_test.go b/cli/cmd/install_test.go index d2cc878f1ea90..bea1d1550fb5f 100644 --- a/cli/cmd/install_test.go +++ b/cli/cmd/install_test.go @@ -130,10 +130,9 @@ func TestRender(t *testing.T) { Replicas: 1, }, Prometheus: charts.Prometheus{ - "enabled": true, - "image": "PrometheusImage", - "name": "linkerd-prometheus", - "logLevel": "info", + "enabled": true, + "image": "PrometheusImage", + "name": "linkerd-prometheus", }, Tracing: map[string]interface{}{ "enabled": false, diff --git a/cli/cmd/testdata/install_addon_control-plane.golden b/cli/cmd/testdata/install_addon_control-plane.golden index da245d96ec899..03226a4c4998e 100644 --- a/cli/cmd/testdata/install_addon_control-plane.golden +++ b/cli/cmd/testdata/install_addon_control-plane.golden @@ -1925,9 +1925,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus tracing: collector: @@ -2408,9 +2416,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2594,10 +2602,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_control-plane.golden b/cli/cmd/testdata/install_control-plane.golden index ea6c2c1c1639c..65ee644b4b361 100644 --- a/cli/cmd/testdata/install_control-plane.golden +++ b/cli/cmd/testdata/install_control-plane.golden @@ -1924,9 +1924,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus tracing: enabled: false @@ -1948,9 +1956,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2134,10 +2142,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_controlplane_tracing_output.golden b/cli/cmd/testdata/install_controlplane_tracing_output.golden index 2538fc69880ca..27ac45368aa6b 100644 --- a/cli/cmd/testdata/install_controlplane_tracing_output.golden +++ b/cli/cmd/testdata/install_controlplane_tracing_output.golden @@ -2840,9 +2840,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus tracing: enabled: false @@ -2905,9 +2913,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3091,10 +3099,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_custom_registry.golden b/cli/cmd/testdata/install_custom_registry.golden index a399006ecdaf9..e337af1c2d334 100644 --- a/cli/cmd/testdata/install_custom_registry.golden +++ b/cli/cmd/testdata/install_custom_registry.golden @@ -2723,9 +2723,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus tracing: enabled: false @@ -2788,9 +2796,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2974,10 +2982,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_default.golden b/cli/cmd/testdata/install_default.golden index d7cf42e45bbb1..a7d3de7ca9ac9 100644 --- a/cli/cmd/testdata/install_default.golden +++ b/cli/cmd/testdata/install_default.golden @@ -2723,9 +2723,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus tracing: enabled: false @@ -2788,9 +2796,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2974,10 +2982,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_ha_output.golden b/cli/cmd/testdata/install_ha_output.golden index 6eaf948f7bb3b..4dc818fb96796 100644 --- a/cli/cmd/testdata/install_ha_output.golden +++ b/cli/cmd/testdata/install_ha_output.golden @@ -2972,9 +2972,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus resources: cpu: @@ -3044,9 +3052,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3230,10 +3238,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_ha_with_overrides_output.golden b/cli/cmd/testdata/install_ha_with_overrides_output.golden index e83eb9c446248..f5b446f3cdbcb 100644 --- a/cli/cmd/testdata/install_ha_with_overrides_output.golden +++ b/cli/cmd/testdata/install_ha_with_overrides_output.golden @@ -2972,9 +2972,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus resources: cpu: @@ -3044,9 +3052,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3230,10 +3238,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_heartbeat_disabled_output.golden b/cli/cmd/testdata/install_heartbeat_disabled_output.golden index db4ebacae6d83..cd6ddbac14731 100644 --- a/cli/cmd/testdata/install_heartbeat_disabled_output.golden +++ b/cli/cmd/testdata/install_heartbeat_disabled_output.golden @@ -2634,9 +2634,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus tracing: enabled: false @@ -2699,9 +2707,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2885,10 +2893,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_helm_output.golden b/cli/cmd/testdata/install_helm_output.golden index e735aecaba502..acc6e1e4de7a6 100644 --- a/cli/cmd/testdata/install_helm_output.golden +++ b/cli/cmd/testdata/install_helm_output.golden @@ -2764,9 +2764,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus tracing: enabled: false @@ -2833,9 +2841,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3019,10 +3027,10 @@ spec: null containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_helm_output_addons.golden b/cli/cmd/testdata/install_helm_output_addons.golden index 670d48367fa87..c4d3b6465ff0c 100644 --- a/cli/cmd/testdata/install_helm_output_addons.golden +++ b/cli/cmd/testdata/install_helm_output_addons.golden @@ -2765,9 +2765,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus tracing: collector: @@ -3305,9 +3313,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3491,10 +3499,10 @@ spec: null containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_helm_output_ha.golden b/cli/cmd/testdata/install_helm_output_ha.golden index d92dca85c7482..da43938c5217b 100644 --- a/cli/cmd/testdata/install_helm_output_ha.golden +++ b/cli/cmd/testdata/install_helm_output_ha.golden @@ -3013,9 +3013,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus resources: cpu: @@ -3089,9 +3097,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3275,10 +3283,10 @@ spec: null containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_no_init_container.golden b/cli/cmd/testdata/install_no_init_container.golden index 81f8a6ef74ef5..3f0985e13dea5 100644 --- a/cli/cmd/testdata/install_no_init_container.golden +++ b/cli/cmd/testdata/install_no_init_container.golden @@ -2456,9 +2456,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus tracing: enabled: false @@ -2521,9 +2529,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2707,10 +2715,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_output.golden b/cli/cmd/testdata/install_output.golden index 5f17742d55b86..97fb395e01367 100644 --- a/cli/cmd/testdata/install_output.golden +++ b/cli/cmd/testdata/install_output.golden @@ -2717,7 +2717,6 @@ data: prometheus: enabled: true image: PrometheusImage - logLevel: info name: linkerd-prometheus tracing: enabled: false @@ -2779,10 +2778,7 @@ metadata: CreatedByAnnotation: CliVersion data: prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s + rule_files: - /etc/prometheus/*_rules.yml @@ -2966,10 +2962,6 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: PrometheusImage imagePullPolicy: ImagePullPolicy livenessProbe: diff --git a/cli/cmd/testdata/install_proxy_ignores.golden b/cli/cmd/testdata/install_proxy_ignores.golden index 8c6c2dff0a2b2..84bf7fd884bf2 100644 --- a/cli/cmd/testdata/install_proxy_ignores.golden +++ b/cli/cmd/testdata/install_proxy_ignores.golden @@ -2723,9 +2723,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus tracing: enabled: false @@ -2788,9 +2796,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2974,10 +2982,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_restricted_dashboard.golden b/cli/cmd/testdata/install_restricted_dashboard.golden index e2765bec1b164..e61ba78b54cb9 100644 --- a/cli/cmd/testdata/install_restricted_dashboard.golden +++ b/cli/cmd/testdata/install_restricted_dashboard.golden @@ -2658,9 +2658,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus tracing: enabled: false @@ -2723,9 +2731,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2909,10 +2917,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_tracing.golden b/cli/cmd/testdata/install_tracing.golden index de1cd08a71f4e..14e3406406a72 100644 --- a/cli/cmd/testdata/install_tracing.golden +++ b/cli/cmd/testdata/install_tracing.golden @@ -2724,9 +2724,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus tracing: collector: @@ -3274,9 +3282,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3460,10 +3468,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_add-on_controlplane.golden b/cli/cmd/testdata/upgrade_add-on_controlplane.golden index 62eacf8c83619..eeb283e1cca53 100644 --- a/cli/cmd/testdata/upgrade_add-on_controlplane.golden +++ b/cli/cmd/testdata/upgrade_add-on_controlplane.golden @@ -1941,9 +1941,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus tracing: collector: @@ -2428,9 +2436,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2614,10 +2622,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_add-on_overwrite.golden b/cli/cmd/testdata/upgrade_add-on_overwrite.golden index b4faecc8f65e8..ec873fa4580f7 100644 --- a/cli/cmd/testdata/upgrade_add-on_overwrite.golden +++ b/cli/cmd/testdata/upgrade_add-on_overwrite.golden @@ -2740,9 +2740,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus tracing: collector: @@ -3292,9 +3300,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3478,10 +3486,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_add_add-on.golden b/cli/cmd/testdata/upgrade_add_add-on.golden index 600daef974b85..45558c8b53c1d 100644 --- a/cli/cmd/testdata/upgrade_add_add-on.golden +++ b/cli/cmd/testdata/upgrade_add_add-on.golden @@ -2740,9 +2740,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus tracing: collector: @@ -3294,9 +3302,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3480,10 +3488,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_default.golden b/cli/cmd/testdata/upgrade_default.golden index e1ecaa3627440..95bd0c0d7f136 100644 --- a/cli/cmd/testdata/upgrade_default.golden +++ b/cli/cmd/testdata/upgrade_default.golden @@ -2739,9 +2739,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus tracing: enabled: false @@ -2804,9 +2812,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2990,10 +2998,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_external_issuer.golden b/cli/cmd/testdata/upgrade_external_issuer.golden index 9f6cab9736a5d..f833e94e4642b 100644 --- a/cli/cmd/testdata/upgrade_external_issuer.golden +++ b/cli/cmd/testdata/upgrade_external_issuer.golden @@ -2725,9 +2725,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus tracing: enabled: false @@ -2790,9 +2798,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2976,10 +2984,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_ha.golden b/cli/cmd/testdata/upgrade_ha.golden index 8d28a4040916c..26ef66a2997cc 100644 --- a/cli/cmd/testdata/upgrade_ha.golden +++ b/cli/cmd/testdata/upgrade_ha.golden @@ -2988,9 +2988,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus resources: cpu: @@ -3060,9 +3068,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3246,10 +3254,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_overwrite_issuer.golden b/cli/cmd/testdata/upgrade_overwrite_issuer.golden index 9d6c43f55ce35..11b05e7a1701c 100644 --- a/cli/cmd/testdata/upgrade_overwrite_issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_issuer.golden @@ -2723,9 +2723,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus tracing: enabled: false @@ -2788,9 +2796,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2974,10 +2982,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden index 8ab28e5d07b99..64d3e9ee12d38 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden @@ -2709,9 +2709,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus tracing: enabled: false @@ -2774,9 +2782,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2960,10 +2968,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden index 9d6c43f55ce35..11b05e7a1701c 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden @@ -2723,9 +2723,17 @@ metadata: data: values: |- prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s image: prom/prometheus:v2.15.2 - logLevel: info name: linkerd-prometheus tracing: enabled: false @@ -2788,9 +2796,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2974,10 +2982,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: From 92a72cb750d664a16c3e42a668d5db9bef60284a Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Mon, 11 May 2020 16:35:49 +0530 Subject: [PATCH 11/42] add extra prometheus config tests Signed-off-by: Tarun Pothulapati --- cli/cmd/install_addon_test.go | 10 + cli/cmd/install_test.go | 4 +- .../install_prometheus_overwrite.golden | 3192 +++++++++++++++++ cli/cmd/testdata/prom-config.yaml | 26 + pkg/charts/linkerd2/values_test.go | 36 +- 5 files changed, 3258 insertions(+), 10 deletions(-) create mode 100644 cli/cmd/testdata/install_prometheus_overwrite.golden create mode 100644 cli/cmd/testdata/prom-config.yaml diff --git a/cli/cmd/install_addon_test.go b/cli/cmd/install_addon_test.go index 9141b5c8863e4..1df4e56205db7 100644 --- a/cli/cmd/install_addon_test.go +++ b/cli/cmd/install_addon_test.go @@ -3,6 +3,7 @@ package cmd import ( "bytes" "fmt" + "path/filepath" "reflect" "testing" @@ -20,11 +21,20 @@ func TestAddOnRender(t *testing.T) { withTracingAddonValues.Tracing["enabled"] = true addFakeTLSSecrets(withTracingAddonValues) + withGrafanaAddOnOverwrite, err := testInstallOptions() + if err != nil { + t.Fatalf("Unexpected error: %v\n", err) + } + withGrafanaAddOnOverwrite.addOnConfig = filepath.Join("testdata", "prom-config.yaml") + withGrafanaAddOnOverwriteValues, _, _ := withGrafanaAddOnOverwrite.validateAndBuild("", nil) + addFakeTLSSecrets(withGrafanaAddOnOverwriteValues) + testCases := []struct { values *charts.Values goldenFileName string }{ {withTracingAddonValues, "install_tracing.golden"}, + {withGrafanaAddOnOverwriteValues, "install_prometheus_overwrite.golden"}, } for i, tc := range testCases { diff --git a/cli/cmd/install_test.go b/cli/cmd/install_test.go index bea1d1550fb5f..c4c091a0df48d 100644 --- a/cli/cmd/install_test.go +++ b/cli/cmd/install_test.go @@ -363,8 +363,8 @@ func TestValidate(t *testing.T) { t.Fatalf("Unexpected error occurred %s", err) } - if actual.Prometheus["logLevel"] != expected { - t.Fatalf("Expected error string\"%s\", got \"%s\"", expected, actual.Prometheus["logLevel"]) + if actual.Prometheus["args"].(map[string]interface{})["log.level"] != expected { + t.Fatalf("Expected error string\"%s\", got \"%s\"", expected, actual.Prometheus["args"].(map[string]interface{})["log.level"]) } }) diff --git a/cli/cmd/testdata/install_prometheus_overwrite.golden b/cli/cmd/testdata/install_prometheus_overwrite.golden new file mode 100644 index 0000000000000..3eb2de026e527 --- /dev/null +++ b/cli/cmd/testdata/install_prometheus_overwrite.golden @@ -0,0 +1,3192 @@ +--- +### +### Linkerd Namespace +### +--- +kind: Namespace +apiVersion: v1 +metadata: + name: linkerd + annotations: + linkerd.io/inject: disabled + labels: + linkerd.io/is-control-plane: "true" + config.linkerd.io/admission-webhooks: disabled + linkerd.io/control-plane-ns: linkerd +--- +### +### Identity Controller Service RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-identity + labels: + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] +- apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["get"] +- apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-identity + labels: + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-identity +subjects: +- kind: ServiceAccount + name: linkerd-identity + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-identity + namespace: linkerd + labels: + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd +--- +### +### Controller RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-controller + labels: + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: ["extensions", "apps"] + resources: ["daemonsets", "deployments", "replicasets", "statefulsets"] + verbs: ["list", "get", "watch"] +- apiGroups: ["extensions", "batch"] + resources: ["cronjobs", "jobs"] + verbs: ["list" , "get", "watch"] +- apiGroups: [""] + resources: ["pods", "endpoints", "services", "replicationcontrollers", "namespaces"] + verbs: ["list", "get", "watch"] +- apiGroups: ["linkerd.io"] + resources: ["serviceprofiles"] + verbs: ["list", "get", "watch"] +- apiGroups: ["split.smi-spec.io"] + resources: ["trafficsplits"] + verbs: ["list", "get", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-controller + labels: + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-controller +subjects: +- kind: ServiceAccount + name: linkerd-controller + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-controller + namespace: linkerd + labels: + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd +--- +### +### Destination Controller Service +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-destination + labels: + linkerd.io/control-plane-component: destination + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["list", "get", "watch"] +- apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["list", "get", "watch"] +- apiGroups: [""] + resources: ["pods", "endpoints", "services"] + verbs: ["list", "get", "watch"] +- apiGroups: ["linkerd.io"] + resources: ["serviceprofiles"] + verbs: ["list", "get", "watch"] +- apiGroups: ["split.smi-spec.io"] + resources: ["trafficsplits"] + verbs: ["list", "get", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-destination + labels: + linkerd.io/control-plane-component: destination + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-destination +subjects: +- kind: ServiceAccount + name: linkerd-destination + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-destination + namespace: linkerd + labels: + linkerd.io/control-plane-component: destination + linkerd.io/control-plane-ns: linkerd +--- +### +### Heartbeat RBAC +### +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: linkerd-heartbeat + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["get"] + resourceNames: ["linkerd-config"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: linkerd-heartbeat + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd +roleRef: + kind: Role + name: linkerd-heartbeat + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: linkerd-heartbeat + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-heartbeat + namespace: linkerd + labels: + linkerd.io/control-plane-component: heartbeat + linkerd.io/control-plane-ns: linkerd +--- +### +### Web RBAC +### +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: linkerd-web + namespace: linkerd + labels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["get"] + resourceNames: ["linkerd-config"] +- apiGroups: [""] + resources: ["namespaces", "configmaps"] + verbs: ["get"] +- apiGroups: [""] + resources: ["serviceaccounts", "pods"] + verbs: ["list"] +- apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: linkerd-web + namespace: linkerd + labels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd +roleRef: + kind: Role + name: linkerd-web + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: linkerd-web + namespace: linkerd +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: linkerd-linkerd-web-check + labels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: ["rbac.authorization.k8s.io"] + resources: ["clusterroles", "clusterrolebindings"] + verbs: ["list"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["list"] +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"] + verbs: ["list"] +- apiGroups: ["policy"] + resources: ["podsecuritypolicies"] + verbs: ["list"] +- apiGroups: ["linkerd.io"] + resources: ["serviceprofiles"] + verbs: ["list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: linkerd-linkerd-web-check + labels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd +roleRef: + kind: ClusterRole + name: linkerd-linkerd-web-check + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: linkerd-web + namespace: linkerd +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-web-admin + labels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-tap-admin +subjects: +- kind: ServiceAccount + name: linkerd-web + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-web + namespace: linkerd + labels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd +--- +### +### Service Profile CRD +### +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: serviceprofiles.linkerd.io + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + linkerd.io/control-plane-ns: linkerd +spec: + group: linkerd.io + versions: + - name: v1alpha1 + served: true + storage: false + - name: v1alpha2 + served: true + storage: true + scope: Namespaced + names: + plural: serviceprofiles + singular: serviceprofile + kind: ServiceProfile + shortNames: + - sp +--- +### +### TrafficSplit CRD +### Copied from https://github.com/deislabs/smi-sdk-go/blob/cea7e1e9372304bbb6c74a3f6ca788d9eaa9cc58/crds/split.yaml +### +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: trafficsplits.split.smi-spec.io + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + linkerd.io/control-plane-ns: linkerd +spec: + group: split.smi-spec.io + version: v1alpha1 + scope: Namespaced + names: + kind: TrafficSplit + shortNames: + - ts + plural: trafficsplits + singular: trafficsplit + additionalPrinterColumns: + - name: Service + type: string + description: The apex service of this split. + JSONPath: .spec.service +--- +### +### Grafana RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd +--- +### +### Proxy Injector RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-proxy-injector + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +- apiGroups: [""] + resources: ["namespaces", "replicationcontrollers"] + verbs: ["list", "get", "watch"] +- apiGroups: [""] + resources: ["pods"] + verbs: ["list", "watch"] +- apiGroups: ["extensions", "apps"] + resources: ["deployments", "replicasets", "daemonsets", "statefulsets"] + verbs: ["list", "get", "watch"] +- apiGroups: ["extensions", "batch"] + resources: ["cronjobs", "jobs"] + verbs: ["list", "get", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-proxy-injector + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd +subjects: +- kind: ServiceAccount + name: linkerd-proxy-injector + namespace: linkerd + apiGroup: "" +roleRef: + kind: ClusterRole + name: linkerd-linkerd-proxy-injector + apiGroup: rbac.authorization.k8s.io +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd +--- +kind: Secret +apiVersion: v1 +metadata: + name: linkerd-proxy-injector-tls + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +type: Opaque +data: + crt.pem: cHJveHkgaW5qZWN0b3IgY3J0 + key.pem: cHJveHkgaW5qZWN0b3Iga2V5 +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + name: linkerd-proxy-injector-webhook-config + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd +webhooks: +- name: linkerd-proxy-injector.linkerd.io + namespaceSelector: + matchExpressions: + - key: config.linkerd.io/admission-webhooks + operator: NotIn + values: + - disabled + clientConfig: + service: + name: linkerd-proxy-injector + namespace: linkerd + path: "/" + caBundle: cHJveHkgaW5qZWN0b3IgY3J0 + failurePolicy: Ignore + rules: + - operations: [ "CREATE" ] + apiGroups: [""] + apiVersions: ["v1"] + resources: ["pods"] + sideEffects: None +--- +### +### Service Profile Validator RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-sp-validator + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["list"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-sp-validator + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd +subjects: +- kind: ServiceAccount + name: linkerd-sp-validator + namespace: linkerd + apiGroup: "" +roleRef: + kind: ClusterRole + name: linkerd-linkerd-sp-validator + apiGroup: rbac.authorization.k8s.io +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd +--- +kind: Secret +apiVersion: v1 +metadata: + name: linkerd-sp-validator-tls + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +type: Opaque +data: + crt.pem: cHJveHkgaW5qZWN0b3IgY3J0 + key.pem: cHJveHkgaW5qZWN0b3Iga2V5 +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + name: linkerd-sp-validator-webhook-config + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd +webhooks: +- name: linkerd-sp-validator.linkerd.io + namespaceSelector: + matchExpressions: + - key: config.linkerd.io/admission-webhooks + operator: NotIn + values: + - disabled + clientConfig: + service: + name: linkerd-sp-validator + namespace: linkerd + path: "/" + caBundle: cHJveHkgaW5qZWN0b3IgY3J0 + failurePolicy: Ignore + rules: + - operations: [ "CREATE" , "UPDATE" ] + apiGroups: ["linkerd.io"] + apiVersions: ["v1alpha1", "v1alpha2"] + resources: ["serviceprofiles"] + sideEffects: None +--- +### +### Tap RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-tap + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["pods", "services", "replicationcontrollers", "namespaces", "nodes"] + verbs: ["list", "get", "watch"] +- apiGroups: ["extensions", "apps"] + resources: ["daemonsets", "deployments", "replicasets", "statefulsets"] + verbs: ["list", "get", "watch"] +- apiGroups: ["extensions", "batch"] + resources: ["cronjobs", "jobs"] + verbs: ["list" , "get", "watch"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-tap-admin + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: ["tap.linkerd.io"] + resources: ["*"] + verbs: ["watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-tap + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-tap +subjects: +- kind: ServiceAccount + name: linkerd-tap + namespace: linkerd +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: linkerd-linkerd-tap-auth-delegator + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: linkerd-tap + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-tap + namespace: linkerd + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: linkerd-linkerd-tap-auth-reader + namespace: kube-system + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: linkerd-tap + namespace: linkerd +--- +kind: Secret +apiVersion: v1 +metadata: + name: linkerd-tap-tls + namespace: linkerd + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +type: Opaque +data: + crt.pem: dGFwIGNydA== + key.pem: dGFwIGtleQ== +--- +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + name: v1alpha1.tap.linkerd.io + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd +spec: + group: tap.linkerd.io + version: v1alpha1 + groupPriorityMinimum: 1000 + versionPriority: 100 + service: + name: linkerd-tap + namespace: linkerd + caBundle: dGFwIGNydA== +--- +### +### Control Plane PSP +### +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: linkerd-linkerd-control-plane + labels: + linkerd.io/control-plane-ns: linkerd +spec: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + allowedCapabilities: + - NET_ADMIN + - NET_RAW + requiredDropCapabilities: + - ALL + hostNetwork: false + hostIPC: false + hostPID: false + seLinux: + rule: RunAsAny + runAsUser: + rule: RunAsAny + supplementalGroups: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + volumes: + - configMap + - emptyDir + - secret + - projected + - downwardAPI + - persistentVolumeClaim +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: linkerd-psp + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: ['policy', 'extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - linkerd-linkerd-control-plane +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: linkerd-psp + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd +roleRef: + kind: Role + name: linkerd-psp + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: linkerd-controller + namespace: linkerd +- kind: ServiceAccount + name: linkerd-destination + namespace: linkerd +- kind: ServiceAccount + name: linkerd-grafana + namespace: linkerd +- kind: ServiceAccount + name: linkerd-heartbeat + namespace: linkerd +- kind: ServiceAccount + name: linkerd-identity + namespace: linkerd +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +- kind: ServiceAccount + name: linkerd-proxy-injector + namespace: linkerd +- kind: ServiceAccount + name: linkerd-sp-validator + namespace: linkerd +- kind: ServiceAccount + name: linkerd-tap + namespace: linkerd +- kind: ServiceAccount + name: linkerd-web + namespace: linkerd + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + global: | + {"linkerdNamespace":"linkerd","cniEnabled":false,"version":"install-control-plane-version","identityContext":{"trustDomain":"cluster.local","trustAnchorsPem":"-----BEGIN CERTIFICATE-----\nMIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy\nLmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE\nAxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0\nxtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364\n6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF\nBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE\nAiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv\nOLO4Zsk1XrGZHGsmyiEyvYF9lpY=\n-----END CERTIFICATE-----\n","issuanceLifetime":"86400s","clockSkewAllowance":"20s","scheme":"linkerd.io/tls"},"autoInjectContext":null,"omitWebhookSideEffects":false,"clusterDomain":"cluster.local"} + proxy: | + {"proxyImage":{"imageName":"gcr.io/linkerd-io/proxy","pullPolicy":"IfNotPresent"},"proxyInitImage":{"imageName":"gcr.io/linkerd-io/proxy-init","pullPolicy":"IfNotPresent"},"controlPort":{"port":4190},"ignoreInboundPorts":[],"ignoreOutboundPorts":[],"inboundPort":{"port":4143},"adminPort":{"port":4191},"outboundPort":{"port":4140},"resource":{"requestCpu":"","requestMemory":"","limitCpu":"","limitMemory":""},"proxyUid":"2102","logLevel":{"level":"warn,linkerd=info"},"disableExternalProfiles":true,"proxyVersion":"install-proxy-version","proxyInitImageVersion":"v1.3.2","debugImage":{"imageName":"gcr.io/linkerd-io/debug","pullPolicy":"IfNotPresent"},"debugImageVersion":"install-debug-version"} + install: | + {"cliVersion":"dev-undefined","flags":[]} +--- +### +### Identity Controller Service +### +--- +kind: Secret +apiVersion: v1 +metadata: + name: linkerd-identity-issuer + namespace: linkerd + labels: + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-issuer-expiry: 2029-02-28T02:03:52Z +data: + crt.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJjakNDQVJpZ0F3SUJBZ0lCQWpBS0JnZ3Foa2pPUFFRREFqQVlNUll3RkFZRFZRUURFdzFqYkhWemRHVnkKTG14dlkyRnNNQjRYRFRFNU1ETXdNekF4TlRrMU1sb1hEVEk1TURJeU9EQXlNRE0xTWxvd0tURW5NQ1VHQTFVRQpBeE1lYVdSbGJuUnBkSGt1YkdsdWEyVnlaQzVqYkhWemRHVnlMbXh2WTJGc01Ga3dFd1lIS29aSXpqMENBUVlJCktvWkl6ajBEQVFjRFFnQUVJU2cwQ21KTkJXTHhKVHNLdDcrYno4QXMxWWZxWkZ1VHEyRm5ZbzAxNk5LVnY3MGUKUUMzVDZ0T3Bhajl4dUtzWGZsVTZaa3VpVlJpaWh3K3RWMmlzcTZOQ01FQXdEZ1lEVlIwUEFRSC9CQVFEQWdFRwpNQjBHQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01CQmdnckJnRUZCUWNEQWpBUEJnTlZIUk1CQWY4RUJUQURBUUgvCk1Bb0dDQ3FHU000OUJBTUNBMGdBTUVVQ0lGK2FNMEJ3MlBkTUZEcS9LdGFCUXZIZEFZYVVQVng4dmYzam4rTTQKQWFENEFpRUE5SEJkanlXeWlLZUt4bEE4Q29PdlVBd0k5NXhjNlhVTW9EeFJTWGpucFhnPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t + key.pem: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSU1JSnltZWtZeitra0NMUGtGbHJVeUF1L2NISllSVHl3Zm1BVVJLS1JYZHpvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFSVNnMENtSk5CV0x4SlRzS3Q3K2J6OEFzMVlmcVpGdVRxMkZuWW8wMTZOS1Z2NzBlUUMzVAo2dE9wYWo5eHVLc1hmbFU2Wmt1aVZSaWlodyt0VjJpc3F3PT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQ== +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-identity + namespace: linkerd + labels: + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: identity + ports: + - name: grpc + port: 8080 + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: identity + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd + name: linkerd-identity + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-identity + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: identity + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-identity + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - identity + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /ping + port: 9990 + initialDelaySeconds: 10 + name: identity + ports: + - containerPort: 8080 + name: grpc + - containerPort: 9990 + name: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: 9990 + securityContext: + runAsUser: 2103 + volumeMounts: + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/identity/issuer + name: identity-issuer + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: localhost.:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.2 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-identity + volumes: + - configMap: + name: linkerd-config + name: config + - name: identity-issuer + secret: + secretName: linkerd-identity-issuer + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### Controller +### +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-controller-api + namespace: linkerd + labels: + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: controller + ports: + - name: http + port: 8085 + targetPort: 8085 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: controller + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd + name: linkerd-controller + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-controller + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: controller + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-controller + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - public-api + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 + - -controller-namespace=linkerd + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: public-api + ports: + - containerPort: 8085 + name: http + - containerPort: 9995 + name: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: 9995 + securityContext: + runAsUser: 2103 + volumeMounts: + - mountPath: /var/run/linkerd/config + name: config + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.2 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-controller + volumes: + - configMap: + name: linkerd-config + name: config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### Destination Controller Service +### +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-dst + namespace: linkerd + labels: + linkerd.io/control-plane-component: destination + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: destination + ports: + - name: grpc + port: 8086 + targetPort: 8086 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: destination + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: destination + linkerd.io/control-plane-ns: linkerd + name: linkerd-destination + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: destination + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-destination + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: destination + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-destination + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - destination + - -addr=:8086 + - -controller-namespace=linkerd + - -enable-h2-upgrade=true + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /ping + port: 9996 + initialDelaySeconds: 10 + name: destination + ports: + - containerPort: 8086 + name: grpc + - containerPort: 9996 + name: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: 9996 + securityContext: + runAsUser: 2103 + volumeMounts: + - mountPath: /var/run/linkerd/config + name: config + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: localhost.:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.2 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-destination + volumes: + - configMap: + name: linkerd-config + name: config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### Heartbeat +### +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: linkerd-heartbeat + namespace: linkerd + labels: + app.kubernetes.io/name: heartbeat + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: heartbeat + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + schedule: "1 2 3 4 5" + successfulJobsHistoryLimit: 0 + jobTemplate: + spec: + template: + metadata: + labels: + linkerd.io/control-plane-component: heartbeat + linkerd.io/workload-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + spec: + nodeSelector: + beta.kubernetes.io/os: linux + serviceAccountName: linkerd-heartbeat + restartPolicy: Never + containers: + - name: heartbeat + image: gcr.io/linkerd-io/controller:install-control-plane-version + imagePullPolicy: IfNotPresent + args: + - "heartbeat" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" + - "-controller-namespace=linkerd" + - "-log-level=info" + securityContext: + runAsUser: 2103 +--- +### +### Web +### +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-web + namespace: linkerd + labels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: web + ports: + - name: http + port: 8084 + targetPort: 8084 + - name: admin-http + port: 9994 + targetPort: 9994 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: web + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd + name: linkerd-web + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-web + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: web + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-web + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 + - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 + - -controller-namespace=linkerd + - -log-level=info + - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ + image: gcr.io/linkerd-io/web:install-control-plane-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /ping + port: 9994 + initialDelaySeconds: 10 + name: web + ports: + - containerPort: 8084 + name: http + - containerPort: 9994 + name: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: 9994 + securityContext: + runAsUser: 2103 + volumeMounts: + - mountPath: /var/run/linkerd/config + name: config + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.2 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-web + volumes: + - configMap: + name: linkerd-config + name: config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### Grafana +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: grafana + ports: + - name: http + port: 3000 + targetPort: 3000 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: grafana + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + name: linkerd-grafana + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-grafana + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-grafana + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana + ports: + - containerPort: 3000 + name: http + readinessProbe: + httpGet: + path: /api/health + port: 3000 + securityContext: + runAsUser: 472 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.2 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-grafana + volumes: + - emptyDir: {} + name: data + - configMap: + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### Proxy Injector +### +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: proxy-injector + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + name: linkerd-proxy-injector + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: proxy-injector + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-proxy-injector + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector + ports: + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: 9995 + securityContext: + runAsUser: 2103 + volumeMounts: + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.2 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-proxy-injector + volumes: + - configMap: + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- +### +### Service Profile Validator +### +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: sp-validator + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + name: linkerd-sp-validator + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: sp-validator + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-sp-validator + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - sp-validator + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /ping + port: 9997 + initialDelaySeconds: 10 + name: sp-validator + ports: + - containerPort: 8443 + name: sp-validator + - containerPort: 9997 + name: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: 9997 + securityContext: + runAsUser: 2103 + volumeMounts: + - mountPath: /var/run/linkerd/tls + name: tls + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.2 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-sp-validator + volumes: + - name: tls + secret: + secretName: linkerd-sp-validator-tls + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### Tap +### +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-tap + namespace: linkerd + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: tap + ports: + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver + port: 443 + targetPort: apiserver +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: tap + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + name: linkerd-tap + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - tap + - -controller-namespace=linkerd + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /ping + port: 9998 + initialDelaySeconds: 10 + name: tap + ports: + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 + name: admin-http + readinessProbe: + failureThreshold: 7 + httpGet: + path: /ready + port: 9998 + securityContext: + runAsUser: 2103 + volumeMounts: + - mountPath: /var/run/linkerd/tls + name: tls + readOnly: true + - mountPath: /var/run/linkerd/config + name: config + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.2 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-tap + volumes: + - configMap: + name: linkerd-config + name: config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + +--- +### +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + prometheus: + alertManagers: + - scheme: http + static_configs: + - targets: + - alertmanager.linkerd.svc:9093 + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 2m + external_labels: + cluster: cluster-1 + query_log_file: /queries.log + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + remoteWrite: + - url: http://cortex-service.default:9009/api/prom/push + scrapeConfigs: + - azure_sd_configs: + - authentication_method: OAuth + client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C + client_secret: mysecret + environment: AzurePublicCloud + port: 9100 + subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11 + tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2 + job_name: service-azure + tracing: + enabled: false +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 2m + external_labels: + cluster: cluster-1 + query_log_file: /queries.log + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) + + - azure_sd_configs: + - authentication_method: OAuth + client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C + client_secret: mysecret + environment: AzurePublicCloud + port: 9100 + subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11 + tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2 + job_name: service-azure + + alerting: + alertmanagers: + - scheme: http + static_configs: + - targets: + - alertmanager.linkerd.svc:9093 + remote_write: + - url: http://cortex-service.default:9009/api/prom/push +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsUser: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.2 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-prometheus + volumes: + - emptyDir: {} + name: data + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/prom-config.yaml b/cli/cmd/testdata/prom-config.yaml new file mode 100644 index 0000000000000..cabdef8ef048f --- /dev/null +++ b/cli/cmd/testdata/prom-config.yaml @@ -0,0 +1,26 @@ +prometheus: + globalConfig: + evaluation_interval: 2m + query_log_file: /queries.log + external_labels: + cluster: cluster-1 + + scrapeConfigs: + - job_name: service-azure + azure_sd_configs: + - environment: AzurePublicCloud + authentication_method: OAuth + subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11 + tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2 + client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C + client_secret: mysecret + port: 9100 + + alertManagers: + - scheme: http + static_configs: + - targets: + - "alertmanager.linkerd.svc:9093" + + remoteWrite: + - url: http://cortex-service.default:9009/api/prom/push diff --git a/pkg/charts/linkerd2/values_test.go b/pkg/charts/linkerd2/values_test.go index 64985cb2d83d8..8b095a01b8eb2 100644 --- a/pkg/charts/linkerd2/values_test.go +++ b/pkg/charts/linkerd2/values_test.go @@ -31,10 +31,20 @@ func TestNewValues(t *testing.T) { HeartbeatSchedule: "0 0 * * *", InstallNamespace: true, Prometheus: Prometheus{ - "enabled": true, - "name": "linkerd-prometheus", - "image": "prom/prometheus:v2.15.2", - "logLevel": "info", + "enabled": true, + "name": "linkerd-prometheus", + "image": "prom/prometheus:v2.15.2", + "args": map[string]interface{}{ + "log.level": "info", + "config.file": "/etc/prometheus/prometheus.yml", + "storage.tsdb.path": "/data", + "storage.tsdb.retention.time": "6h", + }, + "globalConfig": map[string]interface{}{ + "evaluation_interval": "10s", + "scrape_interval": "10s", + "scrape_timeout": "10s", + }, }, Global: &Global{ Namespace: "linkerd", @@ -202,10 +212,20 @@ func TestNewValues(t *testing.T) { } expected.Prometheus = Prometheus{ - "enabled": true, - "logLevel": "info", - "name": "linkerd-prometheus", - "image": "prom/prometheus:v2.15.2", + "enabled": true, + "name": "linkerd-prometheus", + "image": "prom/prometheus:v2.15.2", + "args": map[string]interface{}{ + "log.level": "info", + "config.file": "/etc/prometheus/prometheus.yml", + "storage.tsdb.path": "/data", + "storage.tsdb.retention.time": "6h", + }, + "globalConfig": map[string]interface{}{ + "evaluation_interval": "10s", + "scrape_interval": "10s", + "scrape_timeout": "10s", + }, "resources": map[string]interface{}{ "cpu": map[string]interface{}{ "limit": "4", From f8152deec19a209d351482f5333ecf2413426464 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Mon, 11 May 2020 17:52:22 +0530 Subject: [PATCH 12/42] fix prom example config Signed-off-by: Tarun Pothulapati --- cli/cmd/testdata/install_prometheus_overwrite.golden | 2 -- cli/cmd/testdata/prom-config.yaml | 1 - 2 files changed, 3 deletions(-) diff --git a/cli/cmd/testdata/install_prometheus_overwrite.golden b/cli/cmd/testdata/install_prometheus_overwrite.golden index 3eb2de026e527..e132992b4f665 100644 --- a/cli/cmd/testdata/install_prometheus_overwrite.golden +++ b/cli/cmd/testdata/install_prometheus_overwrite.golden @@ -2738,7 +2738,6 @@ data: evaluation_interval: 2m external_labels: cluster: cluster-1 - query_log_file: /queries.log scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 @@ -2819,7 +2818,6 @@ data: evaluation_interval: 2m external_labels: cluster: cluster-1 - query_log_file: /queries.log scrape_interval: 10s scrape_timeout: 10s diff --git a/cli/cmd/testdata/prom-config.yaml b/cli/cmd/testdata/prom-config.yaml index cabdef8ef048f..0c6c7df116327 100644 --- a/cli/cmd/testdata/prom-config.yaml +++ b/cli/cmd/testdata/prom-config.yaml @@ -1,7 +1,6 @@ prometheus: globalConfig: evaluation_interval: 2m - query_log_file: /queries.log external_labels: cluster: cluster-1 From 4fb9c383fc9c1c947cb7722a228d9bcdcd195475 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Tue, 12 May 2020 01:20:43 +0530 Subject: [PATCH 13/42] update golden files Signed-off-by: Tarun Pothulapati --- cli/cmd/testdata/install_addon_config.golden | 100 +- .../install_addon_control-plane.golden | 1111 ++++++------- cli/cmd/testdata/install_config.golden | 82 +- cli/cmd/testdata/install_control-plane.golden | 648 ++++---- ...install_controlplane_tracing_output.golden | 717 ++++----- .../testdata/install_custom_registry.golden | 834 +++++----- cli/cmd/testdata/install_default.golden | 834 +++++----- cli/cmd/testdata/install_ha_output.golden | 935 ++++++----- .../install_ha_with_overrides_output.golden | 935 ++++++----- .../install_heartbeat_disabled_output.golden | 834 +++++----- .../testdata/install_no_init_container.golden | 833 +++++----- cli/cmd/testdata/install_output.golden | 702 ++++----- .../install_prometheus_overwrite.golden | 592 +++---- cli/cmd/testdata/install_proxy_ignores.golden | 834 +++++----- .../install_restricted_dashboard.golden | 834 +++++----- cli/cmd/testdata/install_tracing.golden | 1370 ++++++++-------- cli/cmd/testdata/upgrade_add-on_config.golden | 100 +- .../upgrade_add-on_controlplane.golden | 1111 ++++++------- .../testdata/upgrade_add-on_overwrite.golden | 1376 +++++++++-------- cli/cmd/testdata/upgrade_add_add-on.golden | 1372 ++++++++-------- cli/cmd/testdata/upgrade_default.golden | 834 +++++----- .../testdata/upgrade_external_issuer.golden | 834 +++++----- .../testdata/upgrade_grafana_disabled.yaml | 735 ++++----- cli/cmd/testdata/upgrade_grafana_enabled.yaml | 881 +++++------ .../upgrade_grafana_enabled_disabled.yaml | 735 ++++----- .../testdata/upgrade_grafana_overwrite.yaml | 881 +++++------ cli/cmd/testdata/upgrade_ha.golden | 935 ++++++----- cli/cmd/testdata/upgrade_ha_config.golden | 82 +- cli/cmd/testdata/upgrade_nothing_addon.yaml | 881 +++++------ .../testdata/upgrade_overwrite_issuer.golden | 834 +++++----- ...write_trust_anchors-external-issuer.golden | 834 +++++----- .../upgrade_overwrite_trust_anchors.golden | 834 +++++----- 32 files changed, 13482 insertions(+), 11972 deletions(-) diff --git a/cli/cmd/testdata/install_addon_config.golden b/cli/cmd/testdata/install_addon_config.golden index 651d3003338ae..3f594e251f327 100644 --- a/cli/cmd/testdata/install_addon_config.golden +++ b/cli/cmd/testdata/install_addon_config.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -827,40 +786,81 @@ subjects: --- ### -### linkerd-collector RBAC +### Grafana RBAC ### --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-collector + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd --- ### -### linkerd-jaeger RBAC +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### linkerd-collector RBAC ### --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd --- ### -### Grafana RBAC +### linkerd-jaeger RBAC ### --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd diff --git a/cli/cmd/testdata/install_addon_control-plane.golden b/cli/cmd/testdata/install_addon_control-plane.golden index 58547d563ba8a..776a8b2ead5bf 100644 --- a/cli/cmd/testdata/install_addon_control-plane.golden +++ b/cli/cmd/testdata/install_addon_control-plane.golden @@ -961,192 +961,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1154,45 +989,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /ready + port: 9995 securityContext: - runAsUser: 65534 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -1223,8 +1055,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -1319,39 +1149,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -1359,40 +1228,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -1519,78 +1386,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -1598,41 +1448,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -1756,61 +1611,163 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + grafana: + enabled: true + image: gcr.io/linkerd-io/grafana + name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + tracing: + collector: + image: omnition/opencensus-collector:0.1.11 + name: linkerd-collector + enabled: true + jaeger: + image: jaegertracing/all-in-one:1.17.1 + name: linkerd-jaeger +--- +### +### Grafana ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: http + port: 3000 + targetPort: 3000 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -1818,46 +1775,40 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /api/health + port: 3000 securityContext: - runAsUser: 2103 + runAsUser: 472 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -1981,101 +1932,190 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-grafana volumes: - - configMap: - name: linkerd-config - name: config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - grafana: - enabled: true - image: gcr.io/linkerd-io/grafana - name: linkerd-grafana - tracing: - collector: - image: omnition/opencensus-collector:0.1.11 - name: linkerd-collector - enabled: true - jaeger: - image: jaegertracing/all-in-one:1.17.1 - name: linkerd-jaeger + - emptyDir: {} + name: data + - configMap: + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity --- ### -### Tracing Collector Service +### Prometheus ### --- -apiVersion: v1 kind: ConfigMap +apiVersion: v1 metadata: - name: linkerd-collector-config + name: linkerd-prometheus-config namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - linkerd-collector-config: | - receivers: - opencensus: - port: 55678 - zipkin: - port: 9411 - queued-exporters: - jaeger-all-in-one: - num-workers: 4 - queue-size: 100 - retry-on-failure: true - sender-type: jaeger-thrift-http - jaeger-thrift-http: - collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces - timeout: 5s + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- -apiVersion: v1 kind: Service +apiVersion: v1 metadata: - name: linkerd-collector + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP - ports: - - name: opencensus - port: 55678 - protocol: TCP - targetPort: 55678 - - name: zipkin - port: 9411 - protocol: TCP - targetPort: 9411 selector: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -2083,61 +2123,67 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: linkerd-collector + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-collector + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-collector - minReadySeconds: 5 - progressDeadlineSeconds: 120 + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version - prometheus.io/path: /metrics - prometheus.io/port: "8888" - prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-collector + linkerd.io/proxy-deployment: linkerd-prometheus spec: + nodeSelector: + beta.kubernetes.io/os: linux containers: - - command: - - /occollector_linux - - --config=/conf/linkerd-collector-config.yaml - env: - - name: GOGC - value: "80" - image: omnition/opencensus-collector:0.1.11 + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: / - port: 13133 - name: oc-collector + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 55678 - - containerPort: 9411 + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: / - port: 13133 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsUser: 65534 volumeMounts: - - mountPath: /conf - name: linkerd-collector-config-val + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2167,6 +2213,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2261,41 +2309,71 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-collector + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - items: - - key: linkerd-collector-config - path: linkerd-collector-config.yaml - name: linkerd-collector-config - name: linkerd-collector-config-val + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Tracing Jaeger Service +### Tracing Collector Service ### --- apiVersion: v1 +kind: ConfigMap +metadata: + name: linkerd-collector-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + linkerd-collector-config: | + receivers: + opencensus: + port: 55678 + zipkin: + port: 9411 + queued-exporters: + jaeger-all-in-one: + num-workers: 4 + queue-size: 100 + retry-on-failure: true + sender-type: jaeger-thrift-http + jaeger-thrift-http: + collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces + timeout: 5s +--- +apiVersion: v1 kind: Service metadata: - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP - selector: - linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: collection - port: 14268 - - name: ui - port: 16686 + - name: opencensus + port: 55678 + protocol: TCP + targetPort: 55678 + - name: zipkin + port: 9411 + protocol: TCP + targetPort: 9411 + selector: + linkerd.io/control-plane-component: linkerd-collector --- apiVersion: apps/v1 kind: Deployment @@ -2303,20 +2381,22 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: linkerd-jaeger + app.kubernetes.io/name: linkerd-collector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: linkerd-collector + minReadySeconds: 5 + progressDeadlineSeconds: 120 template: metadata: annotations: @@ -2327,22 +2407,35 @@ spec: prometheus.io/port: "8888" prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: linkerd-collector spec: containers: - - args: - - --query.base-path=/jaeger - image: jaegertracing/all-in-one:1.17.1 + - command: + - /occollector_linux + - --config=/conf/linkerd-collector-config.yaml + env: + - name: GOGC + value: "80" + image: omnition/opencensus-collector:0.1.11 imagePullPolicy: IfNotPresent - name: jaeger + livenessProbe: + httpGet: + path: / + port: 13133 + name: oc-collector ports: - - containerPort: 14268 - name: collection - - containerPort: 16686 - name: ui + - containerPort: 55678 + - containerPort: 9411 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: linkerd-collector-config-val - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2466,95 +2559,41 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - dnsPolicy: ClusterFirst - serviceAccountName: linkerd-jaeger + serviceAccountName: linkerd-collector volumes: + - configMap: + items: + - key: linkerd-collector-config + path: linkerd-collector-config.yaml + name: linkerd-collector-config + name: linkerd-collector-config-val - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana +### Tracing Jaeger Service ### --- -kind: ConfigMap apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- kind: Service -apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: http - port: 3000 - targetPort: 3000 + - name: collection + port: 14268 + - name: ui + port: 16686 --- apiVersion: apps/v1 kind: Deployment @@ -2562,61 +2601,46 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: linkerd-jaeger app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version + prometheus.io/path: /metrics + prometheus.io/port: "8888" + prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger spec: - nodeSelector: - beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - --query.base-path=/jaeger + image: jaegertracing/all-in-one:1.17.1 imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + name: jaeger ports: - - containerPort: 3000 - name: http - readinessProbe: - httpGet: - path: /api/health - port: 3000 - securityContext: - runAsUser: 472 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config - readOnly: true + - containerPort: 14268 + name: collection + - containerPort: 16686 + name: ui - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2646,8 +2670,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2742,20 +2764,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + dnsPolicy: ClusterFirst + serviceAccountName: linkerd-jaeger volumes: - - emptyDir: {} - name: data - - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_config.golden b/cli/cmd/testdata/install_config.golden index 7ad4db389003a..2f5b9ccd80c68 100644 --- a/cli/cmd/testdata/install_config.golden +++ b/cli/cmd/testdata/install_config.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -838,3 +797,44 @@ metadata: labels: linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd diff --git a/cli/cmd/testdata/install_control-plane.golden b/cli/cmd/testdata/install_control-plane.golden index 8d2fb5c998f5c..2617cd30f6220 100644 --- a/cli/cmd/testdata/install_control-plane.golden +++ b/cli/cmd/testdata/install_control-plane.golden @@ -960,108 +960,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1069,39 +988,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -1226,46 +1148,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -1273,40 +1227,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -1433,78 +1385,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -1512,41 +1447,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -1670,61 +1610,157 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + grafana: + enabled: true + image: gcr.io/linkerd-io/grafana + name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Grafana +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: http + port: 3000 + targetPort: 3000 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -1732,46 +1768,40 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /api/health + port: 3000 securityContext: - runAsUser: 2103 + runAsUser: 472 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -1895,22 +1925,25 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - --- ### -### linkerd add-ons configuration -### ### Prometheus ### --- @@ -2058,106 +2091,24 @@ data: - action: labelmap regex: __tmp_pod_label_(.+) --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - grafana: - enabled: true - image: gcr.io/linkerd-io/grafana - name: linkerd-grafana - tracing: - enabled: false ---- -### -### Grafana -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- kind: Service apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus ports: - - name: http - port: 3000 - targetPort: 3000 + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -2165,20 +2116,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2186,39 +2137,45 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/healthy + port: 9090 initialDelaySeconds: 30 - name: grafana + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 3000 - name: http + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 472 + runAsUser: 65534 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2345,20 +2302,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-prometheus volumes: - emptyDir: {} name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_controlplane_tracing_output.golden b/cli/cmd/testdata/install_controlplane_tracing_output.golden index 2e8dbd53e20bf..13770077ef812 100644 --- a/cli/cmd/testdata/install_controlplane_tracing_output.golden +++ b/cli/cmd/testdata/install_controlplane_tracing_output.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1847,108 +1806,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1956,39 +1834,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2121,20 +2002,14 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - downwardAPI: items: - fieldRef: @@ -2145,28 +2020,66 @@ spec: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2174,40 +2087,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2342,14 +2253,11 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - downwardAPI: items: - fieldRef: @@ -2360,66 +2268,52 @@ spec: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2427,41 +2321,47 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info + - -trace-collector=linkerd-collector.linkerd.svc.cluster.local:55678 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2593,11 +2493,11 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - downwardAPI: items: - fieldRef: @@ -2607,53 +2507,162 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + grafana: + enabled: true + image: gcr.io/linkerd-io/grafana + name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Grafana RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd +--- +### +### Grafana +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: http + port: 3000 + targetPort: 3000 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2661,47 +2670,40 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - - -trace-collector=linkerd-collector.linkerd.svc.cluster.local:55678 - image: gcr.io/linkerd-io/controller:install-control-plane-version + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /api/health + port: 3000 securityContext: - runAsUser: 2103 + runAsUser: 472 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2833,11 +2835,20 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - downwardAPI: items: - fieldRef: @@ -2847,36 +2858,8 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - --- ### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - grafana: - enabled: true - image: gcr.io/linkerd-io/grafana - name: linkerd-grafana - tracing: - enabled: false ---- -### -### Grafana RBAC -### ### Prometheus RBAC ### --- @@ -3065,97 +3048,24 @@ data: - action: labelmap regex: __tmp_pod_label_(.+) --- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd ---- -### -### Grafana -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- kind: Service apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus ports: - - name: http - port: 3000 - targetPort: 3000 + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -3163,20 +3073,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -3184,39 +3094,45 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/healthy + port: 9090 initialDelaySeconds: 30 - name: grafana + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 3000 - name: http + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 472 + runAsUser: 65534 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -3351,20 +3267,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-prometheus volumes: - emptyDir: {} name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-prometheus-config + name: prometheus-config - downwardAPI: items: - fieldRef: diff --git a/cli/cmd/testdata/install_custom_registry.golden b/cli/cmd/testdata/install_custom_registry.golden index 293f7168ded79..4267cdf62b69e 100644 --- a/cli/cmd/testdata/install_custom_registry.golden +++ b/cli/cmd/testdata/install_custom_registry.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1787,108 +1746,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1896,39 +1774,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: my.custom.registry/linkerd-io/grafana:install-control-plane-version + - args: + - proxy-injector + - -log-level=info + image: my.custom.registry/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2053,46 +1934,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2100,40 +2013,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: my.custom.registry/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2260,78 +2171,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2339,41 +2233,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: my.custom.registry/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2497,61 +2396,170 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + grafana: + enabled: true + image: my.custom.registry/linkerd-io/grafana + name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Grafana RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd +--- +### +### Grafana +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: http + port: 3000 + targetPort: 3000 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2559,46 +2567,40 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: my.custom.registry/linkerd-io/controller:install-control-plane-version + - env: + - name: GF_PATHS_DATA + value: /data + image: my.custom.registry/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /api/health + port: 3000 securityContext: - runAsUser: 2103 + runAsUser: 472 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2722,136 +2724,231 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - --- ### -### linkerd add-ons configuration +### Prometheus RBAC ### --- -kind: ConfigMap -apiVersion: v1 +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: linkerd-config-addons - namespace: linkerd + name: linkerd-linkerd-prometheus labels: + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - grafana: - enabled: true - image: my.custom.registry/linkerd-io/grafana - name: linkerd-grafana - tracing: - enabled: false +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] --- -### -### Grafana RBAC -### +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd --- ### -### Grafana +### Prometheus ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-grafana-config + name: linkerd-prometheus-config namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - grafana.ini: |- - instance_name = linkerd-grafana + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml - [auth] - disable_login_form = true + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - [auth.anonymous] - enabled = true - org_role = Editor + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ - [auth.basic] - enabled = false + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - [analytics] - check_for_updates = false + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - [panels] - disable_sanitize_html = true + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- kind: Service apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus ports: - - name: http - port: 3000 - targetPort: 3000 + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -2859,20 +2956,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2880,39 +2977,45 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: my.custom.registry/linkerd-io/grafana:install-control-plane-version + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/healthy + port: 9090 initialDelaySeconds: 30 - name: grafana + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 3000 - name: http + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 472 + runAsUser: 65534 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -3039,20 +3142,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-prometheus volumes: - emptyDir: {} name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_default.golden b/cli/cmd/testdata/install_default.golden index 60271f0f31340..57d4719a0168c 100644 --- a/cli/cmd/testdata/install_default.golden +++ b/cli/cmd/testdata/install_default.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1787,108 +1746,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1896,39 +1774,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2053,46 +1934,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2100,40 +2013,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2260,78 +2171,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2339,41 +2233,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2497,61 +2396,170 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + grafana: + enabled: true + image: gcr.io/linkerd-io/grafana + name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Grafana RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd +--- +### +### Grafana +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: http + port: 3000 + targetPort: 3000 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2559,46 +2567,40 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /api/health + port: 3000 securityContext: - runAsUser: 2103 + runAsUser: 472 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2722,136 +2724,231 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - --- ### -### linkerd add-ons configuration +### Prometheus RBAC ### --- -kind: ConfigMap -apiVersion: v1 +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: linkerd-config-addons - namespace: linkerd + name: linkerd-linkerd-prometheus labels: + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - grafana: - enabled: true - image: gcr.io/linkerd-io/grafana - name: linkerd-grafana - tracing: - enabled: false +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] --- -### -### Grafana RBAC -### +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd --- ### -### Grafana +### Prometheus ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-grafana-config + name: linkerd-prometheus-config namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - grafana.ini: |- - instance_name = linkerd-grafana + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml - [auth] - disable_login_form = true + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - [auth.anonymous] - enabled = true - org_role = Editor + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ - [auth.basic] - enabled = false + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - [analytics] - check_for_updates = false + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - [panels] - disable_sanitize_html = true + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- kind: Service apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus ports: - - name: http - port: 3000 - targetPort: 3000 + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -2859,20 +2956,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2880,39 +2977,45 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/healthy + port: 9090 initialDelaySeconds: 30 - name: grafana + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 3000 - name: http + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 472 + runAsUser: 65534 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -3039,20 +3142,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-prometheus volumes: - emptyDir: {} name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_ha_output.golden b/cli/cmd/testdata/install_ha_output.golden index a9137e2f9dab2..9f5e7bcd90639 100644 --- a/cli/cmd/testdata/install_ha_output.golden +++ b/cli/cmd/testdata/install_ha_output.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1915,108 +1874,30 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: - replicas: 1 + replicas: 3 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector + strategy: + rollingUpdate: + maxUnavailable: 1 template: metadata: annotations: @@ -2024,46 +1905,69 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: kubernetes.io/hostname containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 resources: limits: cpu: "1" - memory: "1024Mi" + memory: "250Mi" requests: cpu: "100m" memory: "50Mi" securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2194,46 +2098,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- -apiVersion: apps/v1 -kind: Deployment +kind: Service +apiVersion: v1 metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined + name: linkerd-sp-validator + namespace: linkerd labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: sp-validator + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + name: linkerd-sp-validator + namespace: linkerd spec: replicas: 3 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator strategy: rollingUpdate: maxUnavailable: 1 @@ -2244,10 +2180,10 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux @@ -2260,7 +2196,7 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - proxy-injector + - sp-validator topologyKey: failure-domain.beta.kubernetes.io/zone weight: 100 requiredDuringSchedulingIgnoredDuringExecution: @@ -2269,30 +2205,30 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - proxy-injector + - sp-validator topologyKey: kubernetes.io/hostname containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 resources: limits: cpu: "1" @@ -2303,8 +2239,6 @@ spec: securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2437,78 +2371,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 3 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap strategy: rollingUpdate: maxUnavailable: 1 @@ -2519,10 +2436,10 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux @@ -2535,7 +2452,7 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - sp-validator + - tap topologyKey: failure-domain.beta.kubernetes.io/zone weight: 100 requiredDuringSchedulingIgnoredDuringExecution: @@ -2544,30 +2461,33 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - sp-validator + - tap topologyKey: kubernetes.io/hostname containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 resources: limits: cpu: "1" @@ -2581,6 +2501,8 @@ spec: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2710,64 +2632,184 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + grafana: + enabled: true + image: gcr.io/linkerd-io/grafana + name: linkerd-grafana + resources: + cpu: + limit: "1" + request: 100m + memory: + limit: 1024Mi + request: 50Mi + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + resources: + cpu: + limit: "4" + request: 300m + memory: + limit: 8192Mi + request: 300Mi + tracing: + enabled: false +--- +### +### Grafana RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd +--- +### +### Grafana ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: http + port: 3000 + targetPort: 3000 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-grafana namespace: linkerd spec: - replicas: 3 + replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap - strategy: - rollingUpdate: - maxUnavailable: 1 + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2775,73 +2817,47 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - tap - topologyKey: failure-domain.beta.kubernetes.io/zone - weight: 100 - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - tap - topologyKey: kubernetes.io/hostname containers: - - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /api/health + port: 3000 resources: limits: cpu: "1" - memory: "250Mi" + memory: "1024Mi" requests: cpu: "100m" memory: "50Mi" securityContext: - runAsUser: 2103 + runAsUser: 472 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2971,143 +2987,231 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - --- ### -### linkerd add-ons configuration +### Prometheus RBAC ### --- -kind: ConfigMap -apiVersion: v1 +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: linkerd-config-addons - namespace: linkerd + name: linkerd-linkerd-prometheus labels: + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - grafana: - enabled: true - image: gcr.io/linkerd-io/grafana - name: linkerd-grafana - resources: - cpu: - limit: "1" - request: 100m - memory: - limit: 1024Mi - request: 50Mi - tracing: - enabled: false +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] --- -### -### Grafana RBAC -### +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd --- ### -### Grafana +### Prometheus ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-grafana-config + name: linkerd-prometheus-config namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - grafana.ini: |- - instance_name = linkerd-grafana + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml - [auth] - disable_login_form = true + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - [auth.anonymous] - enabled = true - org_role = Editor + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ - [auth.basic] - enabled = false + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - [analytics] - check_for_updates = false + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - [panels] - disable_sanitize_html = true + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- kind: Service apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus ports: - - name: http - port: 3000 - targetPort: 3000 + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -3115,20 +3219,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -3136,46 +3240,52 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/healthy + port: 9090 initialDelaySeconds: 30 - name: grafana + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 3000 - name: http + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 resources: limits: - cpu: "1" - memory: "1024Mi" + cpu: "4" + memory: "8192Mi" requests: cpu: "300m" memory: "300Mi" securityContext: - runAsUser: 472 + runAsUser: 65534 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -3308,20 +3418,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-prometheus volumes: - emptyDir: {} name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_ha_with_overrides_output.golden b/cli/cmd/testdata/install_ha_with_overrides_output.golden index a9e3a8848870f..ed952eccb2017 100644 --- a/cli/cmd/testdata/install_ha_with_overrides_output.golden +++ b/cli/cmd/testdata/install_ha_with_overrides_output.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1915,108 +1874,30 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: - replicas: 1 + replicas: 2 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector + strategy: + rollingUpdate: + maxUnavailable: 1 template: metadata: annotations: @@ -2024,46 +1905,69 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: kubernetes.io/hostname containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 resources: limits: cpu: "1" - memory: "1024Mi" + memory: "250Mi" requests: cpu: "100m" memory: "50Mi" securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2194,46 +2098,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- -apiVersion: apps/v1 -kind: Deployment +kind: Service +apiVersion: v1 metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined + name: linkerd-sp-validator + namespace: linkerd labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: sp-validator + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + name: linkerd-sp-validator + namespace: linkerd spec: replicas: 2 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator strategy: rollingUpdate: maxUnavailable: 1 @@ -2244,10 +2180,10 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux @@ -2260,7 +2196,7 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - proxy-injector + - sp-validator topologyKey: failure-domain.beta.kubernetes.io/zone weight: 100 requiredDuringSchedulingIgnoredDuringExecution: @@ -2269,30 +2205,30 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - proxy-injector + - sp-validator topologyKey: kubernetes.io/hostname containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 resources: limits: cpu: "1" @@ -2303,8 +2239,6 @@ spec: securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2437,78 +2371,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 2 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap strategy: rollingUpdate: maxUnavailable: 1 @@ -2519,10 +2436,10 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux @@ -2535,7 +2452,7 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - sp-validator + - tap topologyKey: failure-domain.beta.kubernetes.io/zone weight: 100 requiredDuringSchedulingIgnoredDuringExecution: @@ -2544,30 +2461,33 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - sp-validator + - tap topologyKey: kubernetes.io/hostname containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 resources: limits: cpu: "1" @@ -2581,6 +2501,8 @@ spec: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2710,64 +2632,184 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + grafana: + enabled: true + image: gcr.io/linkerd-io/grafana + name: linkerd-grafana + resources: + cpu: + limit: "1" + request: 100m + memory: + limit: 1024Mi + request: 50Mi + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + resources: + cpu: + limit: "4" + request: 300m + memory: + limit: 8192Mi + request: 300Mi + tracing: + enabled: false +--- +### +### Grafana RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd +--- +### +### Grafana ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: http + port: 3000 + targetPort: 3000 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-grafana namespace: linkerd spec: - replicas: 2 + replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap - strategy: - rollingUpdate: - maxUnavailable: 1 + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2775,73 +2817,47 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - tap - topologyKey: failure-domain.beta.kubernetes.io/zone - weight: 100 - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - tap - topologyKey: kubernetes.io/hostname containers: - - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /api/health + port: 3000 resources: limits: cpu: "1" - memory: "250Mi" + memory: "1024Mi" requests: cpu: "100m" memory: "50Mi" securityContext: - runAsUser: 2103 + runAsUser: 472 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2971,143 +2987,231 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - --- ### -### linkerd add-ons configuration +### Prometheus RBAC ### --- -kind: ConfigMap -apiVersion: v1 +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: linkerd-config-addons - namespace: linkerd + name: linkerd-linkerd-prometheus labels: + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - grafana: - enabled: true - image: gcr.io/linkerd-io/grafana - name: linkerd-grafana - resources: - cpu: - limit: "1" - request: 100m - memory: - limit: 1024Mi - request: 50Mi - tracing: - enabled: false +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] --- -### -### Grafana RBAC -### +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd --- ### -### Grafana +### Prometheus ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-grafana-config + name: linkerd-prometheus-config namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - grafana.ini: |- - instance_name = linkerd-grafana + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml - [auth] - disable_login_form = true + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - [auth.anonymous] - enabled = true - org_role = Editor + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ - [auth.basic] - enabled = false + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - [analytics] - check_for_updates = false + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - [panels] - disable_sanitize_html = true + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- kind: Service apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus ports: - - name: http - port: 3000 - targetPort: 3000 + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -3115,20 +3219,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -3136,46 +3240,52 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/healthy + port: 9090 initialDelaySeconds: 30 - name: grafana + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 3000 - name: http + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 resources: limits: - cpu: "1" - memory: "1024Mi" + cpu: "4" + memory: "8192Mi" requests: cpu: "300m" memory: "300Mi" securityContext: - runAsUser: 472 + runAsUser: 65534 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -3308,20 +3418,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-prometheus volumes: - emptyDir: {} name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_heartbeat_disabled_output.golden b/cli/cmd/testdata/install_heartbeat_disabled_output.golden index b13ba71673c7a..f66bcf019f483 100644 --- a/cli/cmd/testdata/install_heartbeat_disabled_output.golden +++ b/cli/cmd/testdata/install_heartbeat_disabled_output.golden @@ -336,47 +336,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1698,108 +1657,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1807,39 +1685,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -1964,46 +1845,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2011,40 +1924,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2171,78 +2082,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2250,41 +2144,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2408,61 +2307,170 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + grafana: + enabled: true + image: gcr.io/linkerd-io/grafana + name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Grafana RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd +--- +### +### Grafana +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: http + port: 3000 + targetPort: 3000 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2470,46 +2478,40 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /api/health + port: 3000 securityContext: - runAsUser: 2103 + runAsUser: 472 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2633,136 +2635,231 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - --- ### -### linkerd add-ons configuration +### Prometheus RBAC ### --- -kind: ConfigMap -apiVersion: v1 +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: linkerd-config-addons - namespace: linkerd + name: linkerd-linkerd-prometheus labels: + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - grafana: - enabled: true - image: gcr.io/linkerd-io/grafana - name: linkerd-grafana - tracing: - enabled: false +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] --- -### -### Grafana RBAC -### +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd --- ### -### Grafana +### Prometheus ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-grafana-config + name: linkerd-prometheus-config namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - grafana.ini: |- - instance_name = linkerd-grafana + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml - [auth] - disable_login_form = true + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - [auth.anonymous] - enabled = true - org_role = Editor + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ - [auth.basic] - enabled = false + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - [analytics] - check_for_updates = false + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - [panels] - disable_sanitize_html = true + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- kind: Service apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus ports: - - name: http - port: 3000 - targetPort: 3000 + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -2770,20 +2867,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2791,39 +2888,45 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/healthy + port: 9090 initialDelaySeconds: 30 - name: grafana + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 3000 - name: http + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 472 + runAsUser: 65534 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2950,20 +3053,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-prometheus volumes: - emptyDir: {} name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_no_init_container.golden b/cli/cmd/testdata/install_no_init_container.golden index 2eb5eed0f452a..7bd72677993a7 100644 --- a/cli/cmd/testdata/install_no_init_container.golden +++ b/cli/cmd/testdata/install_no_init_container.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1652,108 +1611,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1761,43 +1639,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /ready + port: 9995 securityContext: - runAsUser: 65534 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -1828,8 +1705,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -1891,39 +1766,78 @@ spec: volumeMounts: - mountPath: /var/run/linkerd/identity/end-entity name: linkerd-identity-end-entity - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -1931,40 +1845,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2058,78 +1970,61 @@ spec: volumeMounts: - mountPath: /var/run/linkerd/identity/end-entity name: linkerd-identity-end-entity - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2137,41 +2032,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2262,61 +2162,170 @@ spec: volumeMounts: - mountPath: /var/run/linkerd/identity/end-entity name: linkerd-identity-end-entity - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + grafana: + enabled: true + image: gcr.io/linkerd-io/grafana + name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Grafana RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd +--- +### +### Grafana +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: http + port: 3000 + targetPort: 3000 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2324,46 +2333,40 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /api/health + port: 3000 securityContext: - runAsUser: 2103 + runAsUser: 472 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2454,136 +2457,231 @@ spec: volumeMounts: - mountPath: /var/run/linkerd/identity/end-entity name: linkerd-identity-end-entity - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - --- ### -### linkerd add-ons configuration +### Prometheus RBAC ### --- -kind: ConfigMap -apiVersion: v1 +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: linkerd-config-addons - namespace: linkerd + name: linkerd-linkerd-prometheus labels: + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - grafana: - enabled: true - image: gcr.io/linkerd-io/grafana - name: linkerd-grafana - tracing: - enabled: false +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] --- -### -### Grafana RBAC -### +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd --- ### -### Grafana +### Prometheus ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-grafana-config + name: linkerd-prometheus-config namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - grafana.ini: |- - instance_name = linkerd-grafana + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml - [auth] - disable_login_form = true + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - [auth.anonymous] - enabled = true - org_role = Editor + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ - [auth.basic] - enabled = false + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - [analytics] - check_for_updates = false + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - [panels] - disable_sanitize_html = true + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- kind: Service apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus ports: - - name: http - port: 3000 - targetPort: 3000 + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -2591,20 +2689,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2612,39 +2710,45 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/healthy + port: 9090 initialDelaySeconds: 30 - name: grafana + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 3000 - name: http + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 472 + runAsUser: 65534 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2675,6 +2779,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2736,20 +2842,13 @@ spec: volumeMounts: - mountPath: /var/run/linkerd/identity/end-entity name: linkerd-identity-end-entity - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-prometheus volumes: - emptyDir: {} name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_output.golden b/cli/cmd/testdata/install_output.golden index 7a5fedd3e2c3e..6bd0588e9ef60 100644 --- a/cli/cmd/testdata/install_output.golden +++ b/cli/cmd/testdata/install_output.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-Namespace-prometheus - labels: - ControllerComponentLabel: prometheus - ControllerNamespaceLabel: Namespace -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-Namespace-prometheus - labels: - ControllerComponentLabel: prometheus - ControllerNamespaceLabel: Namespace -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-Namespace-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: Namespace ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: Namespace - labels: - ControllerComponentLabel: prometheus - ControllerNamespaceLabel: Namespace ---- -### ### Proxy Injector RBAC ### --- @@ -1783,108 +1742,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: Namespace - labels: - ControllerComponentLabel: grafana - ControllerNamespaceLabel: Namespace - annotations: - CreatedByAnnotation: CliVersion -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.Namespace.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: Namespace - labels: - ControllerComponentLabel: grafana - ControllerNamespaceLabel: Namespace - annotations: - CreatedByAnnotation: CliVersion -spec: - type: ClusterIP - selector: - ControllerComponentLabel: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: CreatedByAnnotation: CliVersion labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: ControllerImageVersion - ControllerComponentLabel: grafana + ControllerComponentLabel: proxy-injector ControllerNamespaceLabel: Namespace - name: linkerd-grafana + name: linkerd-proxy-injector namespace: Namespace spec: replicas: 1 selector: matchLabels: - ControllerComponentLabel: grafana - ControllerNamespaceLabel: Namespace - linkerd.io/proxy-deployment: linkerd-grafana + ControllerComponentLabel: proxy-injector template: metadata: annotations: @@ -1892,39 +1770,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: ProxyVersion labels: - ControllerComponentLabel: grafana + ControllerComponentLabel: proxy-injector ControllerNamespaceLabel: Namespace WorkloadNamespaceLabel: Namespace - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: GrafanaImage:ControllerImageVersion + - args: + - proxy-injector + - -log-level=ControllerLogLevel + image: ControllerImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2048,46 +1929,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: Namespace + labels: + ControllerComponentLabel: proxy-injector + ControllerNamespaceLabel: Namespace + annotations: + CreatedByAnnotation: CliVersion +spec: + type: ClusterIP + selector: + ControllerComponentLabel: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: Namespace + labels: + ControllerComponentLabel: sp-validator + ControllerNamespaceLabel: Namespace + annotations: + CreatedByAnnotation: CliVersion +spec: + type: ClusterIP + selector: + ControllerComponentLabel: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: CreatedByAnnotation: CliVersion labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: ControllerImageVersion - ControllerComponentLabel: proxy-injector + ControllerComponentLabel: sp-validator ControllerNamespaceLabel: Namespace - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: Namespace spec: replicas: 1 selector: matchLabels: - ControllerComponentLabel: proxy-injector + ControllerComponentLabel: sp-validator template: metadata: annotations: @@ -2095,40 +2008,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: ProxyVersion labels: - ControllerComponentLabel: proxy-injector + ControllerComponentLabel: sp-validator ControllerNamespaceLabel: Namespace WorkloadNamespaceLabel: Namespace - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=ControllerLogLevel image: ControllerImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2254,78 +2165,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: Namespace - labels: - ControllerComponentLabel: proxy-injector - ControllerNamespaceLabel: Namespace - annotations: - CreatedByAnnotation: CliVersion -spec: - type: ClusterIP - selector: - ControllerComponentLabel: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: Namespace labels: - ControllerComponentLabel: sp-validator + ControllerComponentLabel: tap ControllerNamespaceLabel: Namespace annotations: CreatedByAnnotation: CliVersion spec: type: ClusterIP selector: - ControllerComponentLabel: sp-validator + ControllerComponentLabel: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: CreatedByAnnotation: CliVersion labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: ControllerImageVersion - ControllerComponentLabel: sp-validator + ControllerComponentLabel: tap ControllerNamespaceLabel: Namespace - name: linkerd-sp-validator + name: linkerd-tap namespace: Namespace spec: replicas: 1 selector: matchLabels: - ControllerComponentLabel: sp-validator + ControllerComponentLabel: tap + ControllerNamespaceLabel: Namespace + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2333,41 +2227,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: ProxyVersion labels: - ControllerComponentLabel: sp-validator + ControllerComponentLabel: tap ControllerNamespaceLabel: Namespace WorkloadNamespaceLabel: Namespace - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=Namespace - -log-level=ControllerLogLevel image: ControllerImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2490,61 +2389,161 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: Namespace + labels: + ControllerNamespaceLabel: Namespace + annotations: + CreatedByAnnotation: CliVersion +data: + values: |- + grafana: + enabled: true + image: gcr.io/linkerd-io/grafana + name: linkerd-grafana + prometheus: + enabled: true + image: PrometheusImage + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Grafana RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: Namespace + labels: + ControllerComponentLabel: grafana + ControllerNamespaceLabel: Namespace +--- ### +### Grafana +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: Namespace + labels: + ControllerComponentLabel: grafana + ControllerNamespaceLabel: Namespace + annotations: + CreatedByAnnotation: CliVersion +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.Namespace.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-grafana namespace: Namespace labels: - ControllerComponentLabel: tap + ControllerComponentLabel: grafana ControllerNamespaceLabel: Namespace annotations: CreatedByAnnotation: CliVersion spec: type: ClusterIP selector: - ControllerComponentLabel: tap + ControllerComponentLabel: grafana ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: http + port: 3000 + targetPort: 3000 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: CreatedByAnnotation: CliVersion labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: ControllerImageVersion - ControllerComponentLabel: tap + ControllerComponentLabel: grafana ControllerNamespaceLabel: Namespace - name: linkerd-tap + name: linkerd-grafana namespace: Namespace spec: replicas: 1 selector: matchLabels: - ControllerComponentLabel: tap + ControllerComponentLabel: grafana ControllerNamespaceLabel: Namespace - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2552,46 +2551,40 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: ProxyVersion labels: - ControllerComponentLabel: tap + ControllerComponentLabel: grafana ControllerNamespaceLabel: Namespace WorkloadNamespaceLabel: Namespace - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - tap - - -controller-namespace=Namespace - - -log-level=ControllerLogLevel - image: ControllerImage:ControllerImageVersion + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:ControllerImageVersion imagePullPolicy: ImagePullPolicy livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /api/health + port: 3000 securityContext: - runAsUser: 2103 + runAsUser: 472 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2714,118 +2707,28 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: Namespace - labels: - ControllerNamespaceLabel: Namespace - annotations: - CreatedByAnnotation: CliVersion -data: - values: |- - grafana: - enabled: true - image: gcr.io/linkerd-io/grafana - name: linkerd-grafana - tracing: - enabled: false ---- -### -### Grafana RBAC -### ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: Namespace - labels: - ControllerComponentLabel: grafana - ControllerNamespaceLabel: Namespace --- ### -### Grafana +### Prometheus RBAC ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: Namespace - labels: - ControllerComponentLabel: grafana - ControllerNamespaceLabel: Namespace - annotations: - CreatedByAnnotation: CliVersion -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.Namespace.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -3011,21 +2914,21 @@ data: kind: Service apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: Namespace labels: - ControllerComponentLabel: grafana + ControllerComponentLabel: prometheus ControllerNamespaceLabel: Namespace annotations: CreatedByAnnotation: CliVersion spec: type: ClusterIP selector: - ControllerComponentLabel: grafana + ControllerComponentLabel: prometheus ports: - - name: http - port: 3000 - targetPort: 3000 + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -3033,20 +2936,20 @@ metadata: annotations: CreatedByAnnotation: CliVersion labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: ControllerImageVersion - ControllerComponentLabel: grafana + ControllerComponentLabel: prometheus ControllerNamespaceLabel: Namespace - name: linkerd-grafana + name: linkerd-prometheus namespace: Namespace spec: replicas: 1 selector: matchLabels: - ControllerComponentLabel: grafana + ControllerComponentLabel: prometheus ControllerNamespaceLabel: Namespace - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -3054,39 +2957,41 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: ProxyVersion labels: - ControllerComponentLabel: grafana + ControllerComponentLabel: prometheus ControllerNamespaceLabel: Namespace WorkloadNamespaceLabel: Namespace - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:ControllerImageVersion + - args: + image: PrometheusImage imagePullPolicy: ImagePullPolicy livenessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/healthy + port: 9090 initialDelaySeconds: 30 - name: grafana + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 3000 - name: http + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 472 + runAsUser: 65534 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -3212,20 +3117,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-prometheus volumes: - emptyDir: {} name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_prometheus_overwrite.golden b/cli/cmd/testdata/install_prometheus_overwrite.golden index e132992b4f665..ef733bbedd1c9 100644 --- a/cli/cmd/testdata/install_prometheus_overwrite.golden +++ b/cli/cmd/testdata/install_prometheus_overwrite.golden @@ -377,19 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Grafana RBAC -### ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1759,108 +1746,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1868,39 +1774,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2025,46 +1934,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2072,40 +2013,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2232,78 +2171,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +### +### Tap +### +--- kind: Service apiVersion: v1 metadata: - name: linkerd-proxy-injector + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: tap ports: - - name: proxy-injector + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: proxy-injector + targetPort: apiserver --- -### -### Service Profile Validator -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-sp-validator - namespace: linkerd - labels: - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: sp-validator - ports: - - name: sp-validator - port: 443 - targetPort: sp-validator ---- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2311,41 +2233,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2469,61 +2396,189 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + grafana: + enabled: true + image: gcr.io/linkerd-io/grafana + name: linkerd-grafana + prometheus: + alertManagers: + - scheme: http + static_configs: + - targets: + - alertmanager.linkerd.svc:9093 + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 2m + external_labels: + cluster: cluster-1 + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + remoteWrite: + - url: http://cortex-service.default:9009/api/prom/push + scrapeConfigs: + - azure_sd_configs: + - authentication_method: OAuth + client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C + client_secret: mysecret + environment: AzurePublicCloud + port: 9100 + subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11 + tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2 + job_name: service-azure + tracing: + enabled: false +--- +### +### Grafana RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd +--- +### +### Grafana +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: http + port: 3000 + targetPort: 3000 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2531,46 +2586,40 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /api/health + port: 3000 securityContext: - runAsUser: 2103 + runAsUser: 472 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2694,68 +2743,23 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - prometheus: - alertManagers: - - scheme: http - static_configs: - - targets: - - alertmanager.linkerd.svc:9093 - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h - enabled: true - globalConfig: - evaluation_interval: 2m - external_labels: - cluster: cluster-1 - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 - name: linkerd-prometheus - remoteWrite: - - url: http://cortex-service.default:9009/api/prom/push - scrapeConfigs: - - azure_sd_configs: - - authentication_method: OAuth - client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C - client_secret: mysecret - environment: AzurePublicCloud - port: 9100 - subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11 - tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2 - job_name: service-azure - tracing: - enabled: false --- ### ### Prometheus RBAC diff --git a/cli/cmd/testdata/install_proxy_ignores.golden b/cli/cmd/testdata/install_proxy_ignores.golden index 364bfe83cc4b7..bf8ccdb41053d 100644 --- a/cli/cmd/testdata/install_proxy_ignores.golden +++ b/cli/cmd/testdata/install_proxy_ignores.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1787,108 +1746,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1896,39 +1774,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2053,46 +1934,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2100,40 +2013,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2260,78 +2171,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2339,41 +2233,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2497,61 +2396,170 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + grafana: + enabled: true + image: gcr.io/linkerd-io/grafana + name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Grafana RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd +--- +### +### Grafana +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: http + port: 3000 + targetPort: 3000 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2559,46 +2567,40 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /api/health + port: 3000 securityContext: - runAsUser: 2103 + runAsUser: 472 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2722,136 +2724,231 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - --- ### -### linkerd add-ons configuration +### Prometheus RBAC ### --- -kind: ConfigMap -apiVersion: v1 +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: linkerd-config-addons - namespace: linkerd + name: linkerd-linkerd-prometheus labels: + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - grafana: - enabled: true - image: gcr.io/linkerd-io/grafana - name: linkerd-grafana - tracing: - enabled: false +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] --- -### -### Grafana RBAC -### +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd --- ### -### Grafana +### Prometheus ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-grafana-config + name: linkerd-prometheus-config namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - grafana.ini: |- - instance_name = linkerd-grafana + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml - [auth] - disable_login_form = true + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - [auth.anonymous] - enabled = true - org_role = Editor + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ - [auth.basic] - enabled = false + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - [analytics] - check_for_updates = false + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - [panels] - disable_sanitize_html = true + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- kind: Service apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus ports: - - name: http - port: 3000 - targetPort: 3000 + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -2859,20 +2956,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2880,39 +2977,45 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/healthy + port: 9090 initialDelaySeconds: 30 - name: grafana + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 3000 - name: http + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 472 + runAsUser: 65534 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -3039,20 +3142,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-prometheus volumes: - emptyDir: {} name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_restricted_dashboard.golden b/cli/cmd/testdata/install_restricted_dashboard.golden index 3abfe390476c2..3eb24a3f8414e 100644 --- a/cli/cmd/testdata/install_restricted_dashboard.golden +++ b/cli/cmd/testdata/install_restricted_dashboard.golden @@ -312,47 +312,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1722,108 +1681,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1831,39 +1709,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -1988,46 +1869,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2035,40 +1948,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2195,78 +2106,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2274,41 +2168,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2432,61 +2331,170 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + grafana: + enabled: true + image: gcr.io/linkerd-io/grafana + name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Grafana RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd +--- +### +### Grafana +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: http + port: 3000 + targetPort: 3000 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2494,46 +2502,40 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /api/health + port: 3000 securityContext: - runAsUser: 2103 + runAsUser: 472 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2657,136 +2659,231 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - --- ### -### linkerd add-ons configuration +### Prometheus RBAC ### --- -kind: ConfigMap -apiVersion: v1 +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: linkerd-config-addons - namespace: linkerd + name: linkerd-linkerd-prometheus labels: + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - grafana: - enabled: true - image: gcr.io/linkerd-io/grafana - name: linkerd-grafana - tracing: - enabled: false +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] --- -### -### Grafana RBAC -### +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd --- ### -### Grafana +### Prometheus ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-grafana-config + name: linkerd-prometheus-config namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - grafana.ini: |- - instance_name = linkerd-grafana + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml - [auth] - disable_login_form = true + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - [auth.anonymous] - enabled = true - org_role = Editor + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ - [auth.basic] - enabled = false + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - [analytics] - check_for_updates = false + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - [panels] - disable_sanitize_html = true + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- kind: Service apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus ports: - - name: http - port: 3000 - targetPort: 3000 + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -2794,20 +2891,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2815,39 +2912,45 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/healthy + port: 9090 initialDelaySeconds: 30 - name: grafana + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 3000 - name: http + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 472 + runAsUser: 65534 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2974,20 +3077,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-prometheus volumes: - emptyDir: {} name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_tracing.golden b/cli/cmd/testdata/install_tracing.golden index 5321e71a9cf4e..cf593ea6c1aa3 100644 --- a/cli/cmd/testdata/install_tracing.golden +++ b/cli/cmd/testdata/install_tracing.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1788,108 +1747,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1897,39 +1775,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2054,46 +1935,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2101,274 +2014,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 - securityContext: - runAsUser: 2103 - volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - - mountPath: /var/run/linkerd/tls - name: tls - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector - volumes: - - configMap: - name: linkerd-config - name: config - - name: tls - secret: - secretName: linkerd-proxy-injector-tls - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -### -### Service Profile Validator -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-sp-validator - namespace: linkerd - labels: - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: sp-validator - ports: - - name: sp-validator - port: 443 - targetPort: sp-validator ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: sp-validator - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: sp-validator - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - sp-validator - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9997 - initialDelaySeconds: 10 - name: sp-validator - ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9997 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: @@ -2755,6 +2429,19 @@ data: enabled: true image: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus tracing: collector: image: omnition/opencensus-collector:0.1.11 @@ -2765,85 +2452,510 @@ data: name: linkerd-jaeger --- ### -### linkerd-collector RBAC +### Grafana RBAC ### --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-collector + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd --- ### -### linkerd-jaeger RBAC +### Grafana ### --- -kind: ServiceAccount +kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-jaeger + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd ---- -### -### Tracing Collector Service + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: grafana + ports: + - name: http + port: 3000 + targetPort: 3000 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: grafana + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + name: linkerd-grafana + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-grafana + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-grafana + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana + ports: + - containerPort: 3000 + name: http + readinessProbe: + httpGet: + path: /api/health + port: 3000 + securityContext: + runAsUser: 472 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.2 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-grafana + volumes: + - emptyDir: {} + name: data + - configMap: + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- ### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd --- +kind: ServiceAccount apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- kind: ConfigMap +apiVersion: v1 metadata: - name: linkerd-collector-config + name: linkerd-prometheus-config namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - linkerd-collector-config: | - receivers: - opencensus: - port: 55678 - zipkin: - port: 9411 - queued-exporters: - jaeger-all-in-one: - num-workers: 4 - queue-size: 100 - retry-on-failure: true - sender-type: jaeger-thrift-http - jaeger-thrift-http: - collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces - timeout: 5s + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- -apiVersion: v1 kind: Service +apiVersion: v1 metadata: - name: linkerd-collector + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP - ports: - - name: opencensus - port: 55678 - protocol: TCP - targetPort: 55678 - - name: zipkin - port: 9411 - protocol: TCP - targetPort: 9411 selector: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -2851,61 +2963,67 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: linkerd-collector + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-collector + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-collector - minReadySeconds: 5 - progressDeadlineSeconds: 120 + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version - prometheus.io/path: /metrics - prometheus.io/port: "8888" - prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-collector + linkerd.io/proxy-deployment: linkerd-prometheus spec: + nodeSelector: + beta.kubernetes.io/os: linux containers: - - command: - - /occollector_linux - - --config=/conf/linkerd-collector-config.yaml - env: - - name: GOGC - value: "80" - image: omnition/opencensus-collector:0.1.11 + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: / - port: 13133 - name: oc-collector + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 55678 - - containerPort: 9411 + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: / - port: 13133 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsUser: 65534 volumeMounts: - - mountPath: /conf - name: linkerd-collector-config-val + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2935,6 +3053,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3029,41 +3149,97 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-collector + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - items: - - key: linkerd-collector-config - path: linkerd-collector-config.yaml - name: linkerd-collector-config - name: linkerd-collector-config-val + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Tracing Jaeger Service +### linkerd-collector RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-collector + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-ns: linkerd +--- +### +### linkerd-jaeger RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-jaeger + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-ns: linkerd +--- +### +### Tracing Collector Service ### --- apiVersion: v1 +kind: ConfigMap +metadata: + name: linkerd-collector-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + linkerd-collector-config: | + receivers: + opencensus: + port: 55678 + zipkin: + port: 9411 + queued-exporters: + jaeger-all-in-one: + num-workers: 4 + queue-size: 100 + retry-on-failure: true + sender-type: jaeger-thrift-http + jaeger-thrift-http: + collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces + timeout: 5s +--- +apiVersion: v1 kind: Service metadata: - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP - selector: - linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: collection - port: 14268 - - name: ui - port: 16686 + - name: opencensus + port: 55678 + protocol: TCP + targetPort: 55678 + - name: zipkin + port: 9411 + protocol: TCP + targetPort: 9411 + selector: + linkerd.io/control-plane-component: linkerd-collector --- apiVersion: apps/v1 kind: Deployment @@ -3071,20 +3247,22 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: linkerd-jaeger + app.kubernetes.io/name: linkerd-collector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: linkerd-collector + minReadySeconds: 5 + progressDeadlineSeconds: 120 template: metadata: annotations: @@ -3095,22 +3273,35 @@ spec: prometheus.io/port: "8888" prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: linkerd-collector spec: containers: - - args: - - --query.base-path=/jaeger - image: jaegertracing/all-in-one:1.17.1 + - command: + - /occollector_linux + - --config=/conf/linkerd-collector-config.yaml + env: + - name: GOGC + value: "80" + image: omnition/opencensus-collector:0.1.11 imagePullPolicy: IfNotPresent - name: jaeger + livenessProbe: + httpGet: + path: / + port: 13133 + name: oc-collector ports: - - containerPort: 14268 - name: collection - - containerPort: 16686 - name: ui + - containerPort: 55678 + - containerPort: 9411 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: linkerd-collector-config-val - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3234,108 +3425,41 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - dnsPolicy: ClusterFirst - serviceAccountName: linkerd-jaeger + serviceAccountName: linkerd-collector volumes: + - configMap: + items: + - key: linkerd-collector-config + path: linkerd-collector-config.yaml + name: linkerd-collector-config + name: linkerd-collector-config-val - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana RBAC -### ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd ---- -### -### Grafana +### Tracing Jaeger Service ### --- -kind: ConfigMap apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- kind: Service -apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: http - port: 3000 - targetPort: 3000 + - name: collection + port: 14268 + - name: ui + port: 16686 --- apiVersion: apps/v1 kind: Deployment @@ -3343,61 +3467,46 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: linkerd-jaeger app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version + prometheus.io/path: /metrics + prometheus.io/port: "8888" + prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger spec: - nodeSelector: - beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - --query.base-path=/jaeger + image: jaegertracing/all-in-one:1.17.1 imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + name: jaeger ports: - - containerPort: 3000 - name: http - readinessProbe: - httpGet: - path: /api/health - port: 3000 - securityContext: - runAsUser: 472 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config - readOnly: true + - containerPort: 14268 + name: collection + - containerPort: 16686 + name: ui - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3521,20 +3630,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + dnsPolicy: ClusterFirst + serviceAccountName: linkerd-jaeger volumes: - - emptyDir: {} - name: data - - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_add-on_config.golden b/cli/cmd/testdata/upgrade_add-on_config.golden index d6268abf3adc1..d0ac036d7d232 100644 --- a/cli/cmd/testdata/upgrade_add-on_config.golden +++ b/cli/cmd/testdata/upgrade_add-on_config.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -827,40 +786,81 @@ subjects: --- ### -### linkerd-collector RBAC +### Grafana RBAC ### --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-collector + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd --- ### -### linkerd-jaeger RBAC +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### linkerd-collector RBAC ### --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd --- ### -### Grafana RBAC +### linkerd-jaeger RBAC ### --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd diff --git a/cli/cmd/testdata/upgrade_add-on_controlplane.golden b/cli/cmd/testdata/upgrade_add-on_controlplane.golden index 3183c726f9b67..d6c6c0eef0be1 100644 --- a/cli/cmd/testdata/upgrade_add-on_controlplane.golden +++ b/cli/cmd/testdata/upgrade_add-on_controlplane.golden @@ -969,192 +969,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1162,45 +997,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /ready + port: 9995 securityContext: - runAsUser: 65534 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -1231,8 +1063,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -1329,39 +1159,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -1369,40 +1238,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -1531,78 +1398,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -1610,41 +1460,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -1770,61 +1625,163 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + grafana: + enabled: true + image: gcr.io/linkerd-io/grafana + name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + tracing: + collector: + image: omnition/opencensus-collector:0.1.11 + name: linkerd-collector + enabled: true + jaeger: + image: jaegertracing/all-in-one:1.17.1 + name: linkerd-jaeger +--- +### +### Grafana ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: http + port: 3000 + targetPort: 3000 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -1832,46 +1789,40 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /api/health + port: 3000 securityContext: - runAsUser: 2103 + runAsUser: 472 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -1997,101 +1948,190 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-grafana volumes: - - configMap: - name: linkerd-config - name: config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - grafana: - enabled: true - image: gcr.io/linkerd-io/grafana - name: linkerd-grafana - tracing: - collector: - image: omnition/opencensus-collector:0.1.11 - name: linkerd-collector - enabled: true - jaeger: - image: jaegertracing/all-in-one:1.17.1 - name: linkerd-jaeger + - emptyDir: {} + name: data + - configMap: + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity --- ### -### Tracing Collector Service +### Prometheus ### --- -apiVersion: v1 kind: ConfigMap +apiVersion: v1 metadata: - name: linkerd-collector-config + name: linkerd-prometheus-config namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - linkerd-collector-config: | - receivers: - opencensus: - port: 55678 - zipkin: - port: 9411 - queued-exporters: - jaeger-all-in-one: - num-workers: 4 - queue-size: 100 - retry-on-failure: true - sender-type: jaeger-thrift-http - jaeger-thrift-http: - collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces - timeout: 5s + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- -apiVersion: v1 kind: Service +apiVersion: v1 metadata: - name: linkerd-collector + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP - ports: - - name: opencensus - port: 55678 - protocol: TCP - targetPort: 55678 - - name: zipkin - port: 9411 - protocol: TCP - targetPort: 9411 selector: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -2099,61 +2139,67 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: linkerd-collector + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-collector + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-collector - minReadySeconds: 5 - progressDeadlineSeconds: 120 + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - prometheus.io/path: /metrics - prometheus.io/port: "8888" - prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-collector + linkerd.io/proxy-deployment: linkerd-prometheus spec: + nodeSelector: + beta.kubernetes.io/os: linux containers: - - command: - - /occollector_linux - - --config=/conf/linkerd-collector-config.yaml - env: - - name: GOGC - value: "80" - image: omnition/opencensus-collector:0.1.11 + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: / - port: 13133 - name: oc-collector + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 55678 - - containerPort: 9411 + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: / - port: 13133 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsUser: 65534 volumeMounts: - - mountPath: /conf - name: linkerd-collector-config-val + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2183,6 +2229,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2279,41 +2327,71 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-collector + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - items: - - key: linkerd-collector-config - path: linkerd-collector-config.yaml - name: linkerd-collector-config - name: linkerd-collector-config-val + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Tracing Jaeger Service +### Tracing Collector Service ### --- apiVersion: v1 +kind: ConfigMap +metadata: + name: linkerd-collector-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + linkerd-collector-config: | + receivers: + opencensus: + port: 55678 + zipkin: + port: 9411 + queued-exporters: + jaeger-all-in-one: + num-workers: 4 + queue-size: 100 + retry-on-failure: true + sender-type: jaeger-thrift-http + jaeger-thrift-http: + collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces + timeout: 5s +--- +apiVersion: v1 kind: Service metadata: - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP - selector: - linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: collection - port: 14268 - - name: ui - port: 16686 + - name: opencensus + port: 55678 + protocol: TCP + targetPort: 55678 + - name: zipkin + port: 9411 + protocol: TCP + targetPort: 9411 + selector: + linkerd.io/control-plane-component: linkerd-collector --- apiVersion: apps/v1 kind: Deployment @@ -2321,20 +2399,22 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: linkerd-jaeger + app.kubernetes.io/name: linkerd-collector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: linkerd-collector + minReadySeconds: 5 + progressDeadlineSeconds: 120 template: metadata: annotations: @@ -2345,22 +2425,35 @@ spec: prometheus.io/port: "8888" prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: linkerd-collector spec: containers: - - args: - - --query.base-path=/jaeger - image: jaegertracing/all-in-one:1.17.1 + - command: + - /occollector_linux + - --config=/conf/linkerd-collector-config.yaml + env: + - name: GOGC + value: "80" + image: omnition/opencensus-collector:0.1.11 imagePullPolicy: IfNotPresent - name: jaeger + livenessProbe: + httpGet: + path: / + port: 13133 + name: oc-collector ports: - - containerPort: 14268 - name: collection - - containerPort: 16686 - name: ui + - containerPort: 55678 + - containerPort: 9411 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: linkerd-collector-config-val - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2486,95 +2579,41 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - dnsPolicy: ClusterFirst - serviceAccountName: linkerd-jaeger + serviceAccountName: linkerd-collector volumes: + - configMap: + items: + - key: linkerd-collector-config + path: linkerd-collector-config.yaml + name: linkerd-collector-config + name: linkerd-collector-config-val - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana +### Tracing Jaeger Service ### --- -kind: ConfigMap apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- kind: Service -apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: http - port: 3000 - targetPort: 3000 + - name: collection + port: 14268 + - name: ui + port: 16686 --- apiVersion: apps/v1 kind: Deployment @@ -2582,61 +2621,46 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: linkerd-jaeger app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + prometheus.io/path: /metrics + prometheus.io/port: "8888" + prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger spec: - nodeSelector: - beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - --query.base-path=/jaeger + image: jaegertracing/all-in-one:1.17.1 imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + name: jaeger ports: - - containerPort: 3000 - name: http - readinessProbe: - httpGet: - path: /api/health - port: 3000 - securityContext: - runAsUser: 472 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config - readOnly: true + - containerPort: 14268 + name: collection + - containerPort: 16686 + name: ui - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2666,8 +2690,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2764,20 +2786,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + dnsPolicy: ClusterFirst + serviceAccountName: linkerd-jaeger volumes: - - emptyDir: {} - name: data - - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_add-on_overwrite.golden b/cli/cmd/testdata/upgrade_add-on_overwrite.golden index d7eb1b3bbfd9d..e8a3661fc056f 100644 --- a/cli/cmd/testdata/upgrade_add-on_overwrite.golden +++ b/cli/cmd/testdata/upgrade_add-on_overwrite.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1796,108 +1755,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1905,39 +1783,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2064,46 +1945,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2111,276 +2024,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 - securityContext: - runAsUser: 2103 - volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - - mountPath: /var/run/linkerd/tls - name: tls - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector - volumes: - - configMap: - name: linkerd-config - name: config - - name: tls - secret: - secretName: linkerd-proxy-injector-tls - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -### -### Service Profile Validator -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-sp-validator - namespace: linkerd - labels: - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: sp-validator - ports: - - name: sp-validator - port: 443 - targetPort: sp-validator ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: sp-validator - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: sp-validator - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - sp-validator - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9997 - initialDelaySeconds: 10 - name: sp-validator - ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9997 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: @@ -2771,6 +2443,19 @@ data: enabled: true image: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus tracing: collector: image: overwrite-collector-image @@ -2783,85 +2468,512 @@ data: resources: null --- ### -### linkerd-collector RBAC +### Grafana RBAC ### --- kind: ServiceAccount apiVersion: v1 metadata: - name: overwrite-collector + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd --- ### -### linkerd-jaeger RBAC +### Grafana ### --- -kind: ServiceAccount +kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-jaeger + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd ---- + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: grafana + ports: + - name: http + port: 3000 + targetPort: 3000 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: grafana + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + name: linkerd-grafana + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-grafana + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-grafana + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana + ports: + - containerPort: 3000 + name: http + readinessProbe: + httpGet: + path: /api/health + port: 3000 + securityContext: + runAsUser: 472 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw + JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 + MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r + ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg + Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ + Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB + /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe + aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC + IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R + SxZLbJKt6SJIIY9dw5gzQpUQR2U= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.2 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-grafana + volumes: + - emptyDir: {} + name: data + - configMap: + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- ### -### Tracing Collector Service +### Prometheus RBAC ### --- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- kind: ConfigMap +apiVersion: v1 metadata: - name: overwrite-collector-config + name: linkerd-prometheus-config namespace: linkerd labels: - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - linkerd-collector-config: | - receivers: - opencensus: - port: 55678 - zipkin: - port: 9411 - queued-exporters: - jaeger-all-in-one: - num-workers: 4 - queue-size: 100 - retry-on-failure: true - sender-type: jaeger-thrift-http - jaeger-thrift-http: - collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces - timeout: 5s + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- -apiVersion: v1 kind: Service +apiVersion: v1 metadata: - name: overwrite-collector + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP - ports: - - name: opencensus - port: 55678 - protocol: TCP - targetPort: 55678 - - name: zipkin - port: 9411 - protocol: TCP - targetPort: 9411 selector: - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -2869,61 +2981,67 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: overwrite-collector + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: overwrite-collector + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: overwrite-collector - minReadySeconds: 5 - progressDeadlineSeconds: 120 + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - prometheus.io/path: /metrics - prometheus.io/port: "8888" - prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: overwrite-collector + linkerd.io/proxy-deployment: linkerd-prometheus spec: + nodeSelector: + beta.kubernetes.io/os: linux containers: - - command: - - /occollector_linux - - --config=/conf/linkerd-collector-config.yaml - env: - - name: GOGC - value: "80" - image: overwrite-collector-image + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: / - port: 13133 - name: oc-collector + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 55678 - - containerPort: 9411 + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: / - port: 13133 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsUser: 65534 volumeMounts: - - mountPath: /conf - name: overwrite-collector-config-val + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2953,6 +3071,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3026,6 +3146,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 + - --outbound-ports-to-ignore + - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init @@ -3047,41 +3169,97 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: overwrite-collector + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - items: - - key: linkerd-collector-config - path: linkerd-collector-config.yaml - name: overwrite-collector-config - name: overwrite-collector-config-val + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Tracing Jaeger Service +### linkerd-collector RBAC ### --- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: overwrite-collector + namespace: linkerd + labels: + linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-ns: linkerd +--- +### +### linkerd-jaeger RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-jaeger + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-ns: linkerd +--- +### +### Tracing Collector Service +### +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: overwrite-collector-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + linkerd-collector-config: | + receivers: + opencensus: + port: 55678 + zipkin: + port: 9411 + queued-exporters: + jaeger-all-in-one: + num-workers: 4 + queue-size: 100 + retry-on-failure: true + sender-type: jaeger-thrift-http + jaeger-thrift-http: + collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces + timeout: 5s +--- apiVersion: v1 kind: Service metadata: - name: linkerd-jaeger + name: overwrite-collector namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: overwrite-collector linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP - selector: - linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: collection - port: 14268 - - name: ui - port: 16686 + - name: opencensus + port: 55678 + protocol: TCP + targetPort: 55678 + - name: zipkin + port: 9411 + protocol: TCP + targetPort: 9411 + selector: + linkerd.io/control-plane-component: overwrite-collector --- apiVersion: apps/v1 kind: Deployment @@ -3089,20 +3267,22 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: linkerd-jaeger + app.kubernetes.io/name: overwrite-collector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: overwrite-collector linkerd.io/control-plane-ns: linkerd - name: linkerd-jaeger + name: overwrite-collector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: overwrite-collector linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: overwrite-collector + minReadySeconds: 5 + progressDeadlineSeconds: 120 template: metadata: annotations: @@ -3113,22 +3293,35 @@ spec: prometheus.io/port: "8888" prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: overwrite-collector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: overwrite-collector spec: containers: - - args: - - --query.base-path=/jaeger - image: jaegertracing/all-in-one:1.17.1 + - command: + - /occollector_linux + - --config=/conf/linkerd-collector-config.yaml + env: + - name: GOGC + value: "80" + image: overwrite-collector-image imagePullPolicy: IfNotPresent - name: jaeger + livenessProbe: + httpGet: + path: / + port: 13133 + name: oc-collector ports: - - containerPort: 14268 - name: collection - - containerPort: 16686 - name: ui + - containerPort: 55678 + - containerPort: 9411 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: overwrite-collector-config-val - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3231,8 +3424,6 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 - - --outbound-ports-to-ignore - - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.2 imagePullPolicy: IfNotPresent name: linkerd-init @@ -3254,108 +3445,41 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - dnsPolicy: ClusterFirst - serviceAccountName: linkerd-jaeger + serviceAccountName: overwrite-collector volumes: + - configMap: + items: + - key: linkerd-collector-config + path: linkerd-collector-config.yaml + name: overwrite-collector-config + name: overwrite-collector-config-val - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana RBAC -### ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd ---- -### -### Grafana +### Tracing Jaeger Service ### --- -kind: ConfigMap apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- kind: Service -apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: http - port: 3000 - targetPort: 3000 + - name: collection + port: 14268 + - name: ui + port: 16686 --- apiVersion: apps/v1 kind: Deployment @@ -3363,61 +3487,46 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: linkerd-jaeger app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + prometheus.io/path: /metrics + prometheus.io/port: "8888" + prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger spec: - nodeSelector: - beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - --query.base-path=/jaeger + image: jaegertracing/all-in-one:1.17.1 imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + name: jaeger ports: - - containerPort: 3000 - name: http - readinessProbe: - httpGet: - path: /api/health - port: 3000 - securityContext: - runAsUser: 472 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config - readOnly: true + - containerPort: 14268 + name: collection + - containerPort: 16686 + name: ui - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3543,20 +3652,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + dnsPolicy: ClusterFirst + serviceAccountName: linkerd-jaeger volumes: - - emptyDir: {} - name: data - - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_add_add-on.golden b/cli/cmd/testdata/upgrade_add_add-on.golden index e5b0614da2b6c..3f3b15d131382 100644 --- a/cli/cmd/testdata/upgrade_add_add-on.golden +++ b/cli/cmd/testdata/upgrade_add_add-on.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1796,108 +1755,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1905,39 +1783,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2064,46 +1945,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2111,276 +2024,35 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 - securityContext: - runAsUser: 2103 - volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - - mountPath: /var/run/linkerd/tls - name: tls - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector - volumes: - - configMap: - name: linkerd-config - name: config - - name: tls - secret: - secretName: linkerd-proxy-injector-tls - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -### -### Service Profile Validator -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-sp-validator - namespace: linkerd - labels: - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: sp-validator - ports: - - name: sp-validator - port: 443 - targetPort: sp-validator ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: sp-validator - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: sp-validator - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - sp-validator - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9997 - initialDelaySeconds: 10 - name: sp-validator - ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9997 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: @@ -2771,6 +2443,19 @@ data: enabled: true image: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus tracing: collector: image: omnition/opencensus-collector:0.1.11 @@ -2781,85 +2466,512 @@ data: name: linkerd-jaeger --- ### -### linkerd-collector RBAC +### Grafana RBAC ### --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-collector + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd --- ### -### linkerd-jaeger RBAC +### Grafana ### --- -kind: ServiceAccount +kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-jaeger + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd ---- + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: grafana + ports: + - name: http + port: 3000 + targetPort: 3000 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: grafana + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + name: linkerd-grafana + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-grafana + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-grafana + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana + ports: + - containerPort: 3000 + name: http + readinessProbe: + httpGet: + path: /api/health + port: 3000 + securityContext: + runAsUser: 472 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw + JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 + MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r + ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg + Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ + Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB + /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe + aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC + IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R + SxZLbJKt6SJIIY9dw5gzQpUQR2U= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.2 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-grafana + volumes: + - emptyDir: {} + name: data + - configMap: + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- ### -### Tracing Collector Service +### Prometheus RBAC ### --- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- kind: ConfigMap +apiVersion: v1 metadata: - name: linkerd-collector-config + name: linkerd-prometheus-config namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - linkerd-collector-config: | - receivers: - opencensus: - port: 55678 - zipkin: - port: 9411 - queued-exporters: - jaeger-all-in-one: - num-workers: 4 - queue-size: 100 - retry-on-failure: true - sender-type: jaeger-thrift-http - jaeger-thrift-http: - collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces - timeout: 5s + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- -apiVersion: v1 kind: Service +apiVersion: v1 metadata: - name: linkerd-collector + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP - ports: - - name: opencensus - port: 55678 - protocol: TCP - targetPort: 55678 - - name: zipkin - port: 9411 - protocol: TCP - targetPort: 9411 selector: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -2867,61 +2979,67 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: linkerd-collector + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-collector + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-collector - minReadySeconds: 5 - progressDeadlineSeconds: 120 + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - prometheus.io/path: /metrics - prometheus.io/port: "8888" - prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-collector + linkerd.io/proxy-deployment: linkerd-prometheus spec: + nodeSelector: + beta.kubernetes.io/os: linux containers: - - command: - - /occollector_linux - - --config=/conf/linkerd-collector-config.yaml - env: - - name: GOGC - value: "80" - image: omnition/opencensus-collector:0.1.11 + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: / - port: 13133 - name: oc-collector + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 55678 - - containerPort: 9411 + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: / - port: 13133 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsUser: 65534 volumeMounts: - - mountPath: /conf - name: linkerd-collector-config-val + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2951,6 +3069,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3047,41 +3167,97 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-collector + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - items: - - key: linkerd-collector-config - path: linkerd-collector-config.yaml - name: linkerd-collector-config - name: linkerd-collector-config-val + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Tracing Jaeger Service +### linkerd-collector RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-collector + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-ns: linkerd +--- +### +### linkerd-jaeger RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-jaeger + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-ns: linkerd +--- +### +### Tracing Collector Service ### --- apiVersion: v1 +kind: ConfigMap +metadata: + name: linkerd-collector-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + linkerd-collector-config: | + receivers: + opencensus: + port: 55678 + zipkin: + port: 9411 + queued-exporters: + jaeger-all-in-one: + num-workers: 4 + queue-size: 100 + retry-on-failure: true + sender-type: jaeger-thrift-http + jaeger-thrift-http: + collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces + timeout: 5s +--- +apiVersion: v1 kind: Service metadata: - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP - selector: - linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: collection - port: 14268 - - name: ui - port: 16686 + - name: opencensus + port: 55678 + protocol: TCP + targetPort: 55678 + - name: zipkin + port: 9411 + protocol: TCP + targetPort: 9411 + selector: + linkerd.io/control-plane-component: linkerd-collector --- apiVersion: apps/v1 kind: Deployment @@ -3089,20 +3265,22 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: linkerd-jaeger + app.kubernetes.io/name: linkerd-collector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: linkerd-collector + minReadySeconds: 5 + progressDeadlineSeconds: 120 template: metadata: annotations: @@ -3113,22 +3291,35 @@ spec: prometheus.io/port: "8888" prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: linkerd-collector spec: containers: - - args: - - --query.base-path=/jaeger - image: jaegertracing/all-in-one:1.17.1 + - command: + - /occollector_linux + - --config=/conf/linkerd-collector-config.yaml + env: + - name: GOGC + value: "80" + image: omnition/opencensus-collector:0.1.11 imagePullPolicy: IfNotPresent - name: jaeger + livenessProbe: + httpGet: + path: / + port: 13133 + name: oc-collector ports: - - containerPort: 14268 - name: collection - - containerPort: 16686 - name: ui + - containerPort: 55678 + - containerPort: 9411 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: linkerd-collector-config-val - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3254,108 +3445,41 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - dnsPolicy: ClusterFirst - serviceAccountName: linkerd-jaeger + serviceAccountName: linkerd-collector volumes: + - configMap: + items: + - key: linkerd-collector-config + path: linkerd-collector-config.yaml + name: linkerd-collector-config + name: linkerd-collector-config-val - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana RBAC -### ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd ---- -### -### Grafana +### Tracing Jaeger Service ### --- -kind: ConfigMap apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- kind: Service -apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: http - port: 3000 - targetPort: 3000 + - name: collection + port: 14268 + - name: ui + port: 16686 --- apiVersion: apps/v1 kind: Deployment @@ -3363,61 +3487,46 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: linkerd-jaeger app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + prometheus.io/path: /metrics + prometheus.io/port: "8888" + prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger spec: - nodeSelector: - beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - --query.base-path=/jaeger + image: jaegertracing/all-in-one:1.17.1 imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + name: jaeger ports: - - containerPort: 3000 - name: http - readinessProbe: - httpGet: - path: /api/health - port: 3000 - securityContext: - runAsUser: 472 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config - readOnly: true + - containerPort: 14268 + name: collection + - containerPort: 16686 + name: ui - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3543,20 +3652,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + dnsPolicy: ClusterFirst + serviceAccountName: linkerd-jaeger volumes: - - emptyDir: {} - name: data - - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_default.golden b/cli/cmd/testdata/upgrade_default.golden index 21059d8df55d7..7cc045372974b 100644 --- a/cli/cmd/testdata/upgrade_default.golden +++ b/cli/cmd/testdata/upgrade_default.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1795,108 +1754,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1904,39 +1782,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2063,46 +1944,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2110,40 +2023,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2272,78 +2183,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2351,41 +2245,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2511,61 +2410,170 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + grafana: + enabled: true + image: gcr.io/linkerd-io/grafana + name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Grafana RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd +--- +### +### Grafana +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: http + port: 3000 + targetPort: 3000 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2573,46 +2581,40 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /api/health + port: 3000 securityContext: - runAsUser: 2103 + runAsUser: 472 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2738,136 +2740,231 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - --- ### -### linkerd add-ons configuration +### Prometheus RBAC ### --- -kind: ConfigMap -apiVersion: v1 +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: linkerd-config-addons - namespace: linkerd + name: linkerd-linkerd-prometheus labels: + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - grafana: - enabled: true - image: gcr.io/linkerd-io/grafana - name: linkerd-grafana - tracing: - enabled: false +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] --- -### -### Grafana RBAC -### +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd --- ### -### Grafana +### Prometheus ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-grafana-config + name: linkerd-prometheus-config namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - grafana.ini: |- - instance_name = linkerd-grafana + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml - [auth] - disable_login_form = true + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - [auth.anonymous] - enabled = true - org_role = Editor + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ - [auth.basic] - enabled = false + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - [analytics] - check_for_updates = false + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - [panels] - disable_sanitize_html = true + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- kind: Service apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus ports: - - name: http - port: 3000 - targetPort: 3000 + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -2875,20 +2972,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2896,39 +2993,45 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/healthy + port: 9090 initialDelaySeconds: 30 - name: grafana + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 3000 - name: http + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 472 + runAsUser: 65534 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -3057,20 +3160,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-prometheus volumes: - emptyDir: {} name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_external_issuer.golden b/cli/cmd/testdata/upgrade_external_issuer.golden index 17d7fe36ff772..8310279f9da8b 100644 --- a/cli/cmd/testdata/upgrade_external_issuer.golden +++ b/cli/cmd/testdata/upgrade_external_issuer.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1781,108 +1740,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1890,39 +1768,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2049,46 +1930,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2096,40 +2009,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2258,78 +2169,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2337,41 +2231,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2497,61 +2396,170 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + grafana: + enabled: true + image: gcr.io/linkerd-io/grafana + name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Grafana RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd +--- +### +### Grafana +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: http + port: 3000 + targetPort: 3000 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2559,46 +2567,40 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /api/health + port: 3000 securityContext: - runAsUser: 2103 + runAsUser: 472 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2724,136 +2726,231 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - --- ### -### linkerd add-ons configuration +### Prometheus RBAC ### --- -kind: ConfigMap -apiVersion: v1 +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: linkerd-config-addons - namespace: linkerd + name: linkerd-linkerd-prometheus labels: + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - grafana: - enabled: true - image: gcr.io/linkerd-io/grafana - name: linkerd-grafana - tracing: - enabled: false +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] --- -### -### Grafana RBAC -### +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd --- ### -### Grafana +### Prometheus ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-grafana-config + name: linkerd-prometheus-config namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - grafana.ini: |- - instance_name = linkerd-grafana + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml - [auth] - disable_login_form = true + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - [auth.anonymous] - enabled = true - org_role = Editor + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ - [auth.basic] - enabled = false + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - [analytics] - check_for_updates = false + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - [panels] - disable_sanitize_html = true + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- kind: Service apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus ports: - - name: http - port: 3000 - targetPort: 3000 + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -2861,20 +2958,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2882,39 +2979,45 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/healthy + port: 9090 initialDelaySeconds: 30 - name: grafana + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 3000 - name: http + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 472 + runAsUser: 65534 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -3043,20 +3146,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-prometheus volumes: - emptyDir: {} name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_grafana_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_disabled.yaml index 9852cc7bd8be1..b225a4fd39c24 100644 --- a/cli/cmd/testdata/upgrade_grafana_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_disabled.yaml @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1791,183 +1750,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1975,45 +1778,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /ready + port: 9995 securityContext: - runAsUser: 65534 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2044,8 +1844,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2142,39 +1940,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2182,40 +2019,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2344,78 +2179,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2423,41 +2241,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2583,61 +2406,271 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + grafana: + enabled: false + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- ### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2645,46 +2678,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 2103 + runAsUser: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2714,6 +2747,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2810,35 +2845,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - grafana: - enabled: false - tracing: - enabled: false diff --git a/cli/cmd/testdata/upgrade_grafana_enabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled.yaml index 0fab46f6feb8e..1af9d1a620a99 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled.yaml @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1795,192 +1754,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1988,244 +1782,37 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /ready + port: 9995 securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 2103 + runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/config name: config @@ -2855,6 +2442,19 @@ data: enabled: true image: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus tracing: enabled: false --- @@ -3157,3 +2757,416 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsUser: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw + JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 + MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r + ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg + Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ + Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB + /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe + aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC + IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R + SxZLbJKt6SJIIY9dw5gzQpUQR2U= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.2 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-prometheus + volumes: + - emptyDir: {} + name: data + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml index 9852cc7bd8be1..b225a4fd39c24 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1791,183 +1750,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1975,45 +1778,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /ready + port: 9995 securityContext: - runAsUser: 65534 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2044,8 +1844,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2142,39 +1940,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2182,40 +2019,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2344,78 +2179,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2423,41 +2241,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2583,61 +2406,271 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + grafana: + enabled: false + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- ### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2645,46 +2678,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 2103 + runAsUser: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2714,6 +2747,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2810,35 +2845,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - grafana: - enabled: false - tracing: - enabled: false diff --git a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml index eb7214b061261..fc76790fa3dbf 100644 --- a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1795,192 +1754,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1988,244 +1782,37 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /ready + port: 9995 securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 2103 + runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/config name: config @@ -2855,6 +2442,19 @@ data: enabled: true image: linkerd-image-overwrite name: linkerd-grafana-overwrite + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus tracing: enabled: false --- @@ -3157,3 +2757,416 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsUser: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw + JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 + MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r + ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg + Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ + Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB + /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe + aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC + IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R + SxZLbJKt6SJIIY9dw5gzQpUQR2U= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.2 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-prometheus + volumes: + - emptyDir: {} + name: data + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_ha.golden b/cli/cmd/testdata/upgrade_ha.golden index b4b2ba4332367..2ec9bafa97e34 100644 --- a/cli/cmd/testdata/upgrade_ha.golden +++ b/cli/cmd/testdata/upgrade_ha.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1923,108 +1882,30 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: - replicas: 1 + replicas: 3 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector + strategy: + rollingUpdate: + maxUnavailable: 1 template: metadata: annotations: @@ -2032,46 +1913,69 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: kubernetes.io/hostname containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 resources: limits: cpu: "1" - memory: "1024Mi" + memory: "250Mi" requests: cpu: "100m" memory: "50Mi" securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2204,46 +2108,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- -apiVersion: apps/v1 -kind: Deployment +kind: Service +apiVersion: v1 metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined + name: linkerd-sp-validator + namespace: linkerd labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: sp-validator + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + name: linkerd-sp-validator + namespace: linkerd spec: replicas: 3 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator strategy: rollingUpdate: maxUnavailable: 1 @@ -2254,10 +2190,10 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux @@ -2270,7 +2206,7 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - proxy-injector + - sp-validator topologyKey: failure-domain.beta.kubernetes.io/zone weight: 100 requiredDuringSchedulingIgnoredDuringExecution: @@ -2279,30 +2215,30 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - proxy-injector + - sp-validator topologyKey: kubernetes.io/hostname containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 resources: limits: cpu: "1" @@ -2313,8 +2249,6 @@ spec: securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2449,78 +2383,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 3 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap strategy: rollingUpdate: maxUnavailable: 1 @@ -2531,10 +2448,10 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux @@ -2547,7 +2464,7 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - sp-validator + - tap topologyKey: failure-domain.beta.kubernetes.io/zone weight: 100 requiredDuringSchedulingIgnoredDuringExecution: @@ -2556,30 +2473,33 @@ spec: - key: linkerd.io/control-plane-component operator: In values: - - sp-validator + - tap topologyKey: kubernetes.io/hostname containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 resources: limits: cpu: "1" @@ -2593,6 +2513,8 @@ spec: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2724,64 +2646,184 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + grafana: + enabled: true + image: gcr.io/linkerd-io/grafana + name: linkerd-grafana + resources: + cpu: + limit: "1" + request: 100m + memory: + limit: 1024Mi + request: 50Mi + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + resources: + cpu: + limit: "4" + request: 300m + memory: + limit: 8192Mi + request: 300Mi + tracing: + enabled: false +--- +### +### Grafana RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd +--- +### +### Grafana ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: http + port: 3000 + targetPort: 3000 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-grafana namespace: linkerd spec: - replicas: 3 + replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap - strategy: - rollingUpdate: - maxUnavailable: 1 + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2789,73 +2831,47 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - tap - topologyKey: failure-domain.beta.kubernetes.io/zone - weight: 100 - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - tap - topologyKey: kubernetes.io/hostname containers: - - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /api/health + port: 3000 resources: limits: cpu: "1" - memory: "250Mi" + memory: "1024Mi" requests: cpu: "100m" memory: "50Mi" securityContext: - runAsUser: 2103 + runAsUser: 472 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2987,143 +3003,231 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - --- ### -### linkerd add-ons configuration +### Prometheus RBAC ### --- -kind: ConfigMap -apiVersion: v1 +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: linkerd-config-addons - namespace: linkerd + name: linkerd-linkerd-prometheus labels: + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - grafana: - enabled: true - image: gcr.io/linkerd-io/grafana - name: linkerd-grafana - resources: - cpu: - limit: "1" - request: 100m - memory: - limit: 1024Mi - request: 50Mi - tracing: - enabled: false +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] --- -### -### Grafana RBAC -### +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd --- ### -### Grafana +### Prometheus ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-grafana-config + name: linkerd-prometheus-config namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - grafana.ini: |- - instance_name = linkerd-grafana + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml - [auth] - disable_login_form = true + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - [auth.anonymous] - enabled = true - org_role = Editor + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ - [auth.basic] - enabled = false + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - [analytics] - check_for_updates = false + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - [panels] - disable_sanitize_html = true + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- kind: Service apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus ports: - - name: http - port: 3000 - targetPort: 3000 + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -3131,20 +3235,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -3152,46 +3256,52 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/healthy + port: 9090 initialDelaySeconds: 30 - name: grafana + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 3000 - name: http + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 resources: limits: - cpu: "1" - memory: "1024Mi" + cpu: "4" + memory: "8192Mi" requests: cpu: "300m" memory: "300Mi" securityContext: - runAsUser: 472 + runAsUser: 65534 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -3326,20 +3436,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-prometheus volumes: - emptyDir: {} name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_ha_config.golden b/cli/cmd/testdata/upgrade_ha_config.golden index 945b33b58f54b..b78df4951de09 100644 --- a/cli/cmd/testdata/upgrade_ha_config.golden +++ b/cli/cmd/testdata/upgrade_ha_config.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -838,3 +797,44 @@ metadata: labels: linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd diff --git a/cli/cmd/testdata/upgrade_nothing_addon.yaml b/cli/cmd/testdata/upgrade_nothing_addon.yaml index 0fab46f6feb8e..1af9d1a620a99 100644 --- a/cli/cmd/testdata/upgrade_nothing_addon.yaml +++ b/cli/cmd/testdata/upgrade_nothing_addon.yaml @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1795,192 +1754,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1988,244 +1782,37 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /ready + port: 9995 securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 2103 + runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/config name: config @@ -2855,6 +2442,19 @@ data: enabled: true image: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus tracing: enabled: false --- @@ -3157,3 +2757,416 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsUser: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw + JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 + MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r + ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg + Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ + Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB + /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe + aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC + IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R + SxZLbJKt6SJIIY9dw5gzQpUQR2U= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.2 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-prometheus + volumes: + - emptyDir: {} + name: data + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_overwrite_issuer.golden b/cli/cmd/testdata/upgrade_overwrite_issuer.golden index 7dd5e59409a5c..d32e466d1f867 100644 --- a/cli/cmd/testdata/upgrade_overwrite_issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_issuer.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1787,108 +1746,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1896,39 +1774,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2053,46 +1934,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2100,40 +2013,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2260,78 +2171,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2339,41 +2233,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2497,61 +2396,170 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + grafana: + enabled: true + image: gcr.io/linkerd-io/grafana + name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Grafana RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd +--- +### +### Grafana +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: http + port: 3000 + targetPort: 3000 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2559,46 +2567,40 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /api/health + port: 3000 securityContext: - runAsUser: 2103 + runAsUser: 472 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2722,136 +2724,231 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - --- ### -### linkerd add-ons configuration +### Prometheus RBAC ### --- -kind: ConfigMap -apiVersion: v1 +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: linkerd-config-addons - namespace: linkerd + name: linkerd-linkerd-prometheus labels: + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - grafana: - enabled: true - image: gcr.io/linkerd-io/grafana - name: linkerd-grafana - tracing: - enabled: false +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] --- -### -### Grafana RBAC -### +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd --- ### -### Grafana +### Prometheus ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-grafana-config + name: linkerd-prometheus-config namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - grafana.ini: |- - instance_name = linkerd-grafana + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml - [auth] - disable_login_form = true + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - [auth.anonymous] - enabled = true - org_role = Editor + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ - [auth.basic] - enabled = false + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - [analytics] - check_for_updates = false + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - [panels] - disable_sanitize_html = true + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- kind: Service apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus ports: - - name: http - port: 3000 - targetPort: 3000 + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -2859,20 +2956,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2880,39 +2977,45 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/healthy + port: 9090 initialDelaySeconds: 30 - name: grafana + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 3000 - name: http + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 472 + runAsUser: 65534 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -3039,20 +3142,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-prometheus volumes: - emptyDir: {} name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden index 7c7c1340acb57..829dfbb17568e 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1773,108 +1732,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1882,39 +1760,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2039,46 +1920,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2086,40 +1999,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2246,78 +2157,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2325,41 +2219,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2483,61 +2382,170 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + grafana: + enabled: true + image: gcr.io/linkerd-io/grafana + name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Grafana RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd +--- +### +### Grafana +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: http + port: 3000 + targetPort: 3000 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2545,46 +2553,40 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /api/health + port: 3000 securityContext: - runAsUser: 2103 + runAsUser: 472 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2708,136 +2710,231 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - --- ### -### linkerd add-ons configuration +### Prometheus RBAC ### --- -kind: ConfigMap -apiVersion: v1 +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: linkerd-config-addons - namespace: linkerd + name: linkerd-linkerd-prometheus labels: + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - grafana: - enabled: true - image: gcr.io/linkerd-io/grafana - name: linkerd-grafana - tracing: - enabled: false +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] --- -### -### Grafana RBAC -### +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd --- ### -### Grafana +### Prometheus ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-grafana-config + name: linkerd-prometheus-config namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - grafana.ini: |- - instance_name = linkerd-grafana + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml - [auth] - disable_login_form = true + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - [auth.anonymous] - enabled = true - org_role = Editor + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ - [auth.basic] - enabled = false + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - [analytics] - check_for_updates = false + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - [panels] - disable_sanitize_html = true + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- kind: Service apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus ports: - - name: http - port: 3000 - targetPort: 3000 + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -2845,20 +2942,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2866,39 +2963,45 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/healthy + port: 9090 initialDelaySeconds: 30 - name: grafana + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 3000 - name: http + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 472 + runAsUser: 65534 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -3025,20 +3128,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-prometheus volumes: - emptyDir: {} name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden index 7dd5e59409a5c..d32e466d1f867 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1787,108 +1746,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1896,39 +1774,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2053,46 +1934,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2100,40 +2013,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2260,78 +2171,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2339,41 +2233,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2497,61 +2396,170 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + grafana: + enabled: true + image: gcr.io/linkerd-io/grafana + name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Grafana RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd +--- +### +### Grafana +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: http + port: 3000 + targetPort: 3000 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: @@ -2559,46 +2567,40 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /api/health + port: 3000 securityContext: - runAsUser: 2103 + runAsUser: 472 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2722,136 +2724,231 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - --- ### -### linkerd add-ons configuration +### Prometheus RBAC ### --- -kind: ConfigMap -apiVersion: v1 +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: linkerd-config-addons - namespace: linkerd + name: linkerd-linkerd-prometheus labels: + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - grafana: - enabled: true - image: gcr.io/linkerd-io/grafana - name: linkerd-grafana - tracing: - enabled: false +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] --- -### -### Grafana RBAC -### +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd --- ### -### Grafana +### Prometheus ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-grafana-config + name: linkerd-prometheus-config namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined data: - grafana.ini: |- - instance_name = linkerd-grafana + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml - [auth] - disable_login_form = true + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - [auth.anonymous] - enabled = true - org_role = Editor + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ - [auth.basic] - enabled = false + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - [analytics] - check_for_updates = false + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - [panels] - disable_sanitize_html = true + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- kind: Service apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus ports: - - name: http - port: 3000 - targetPort: 3000 + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -2859,20 +2956,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2880,39 +2977,45 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/healthy + port: 9090 initialDelaySeconds: 30 - name: grafana + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 3000 - name: http + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 472 + runAsUser: 65534 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -3039,20 +3142,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-prometheus volumes: - emptyDir: {} name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity From d801768b02e443adc2505c82fc5cedcb11811cc4 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Tue, 12 May 2020 01:24:39 +0530 Subject: [PATCH 14/42] remove grafana.enabled check in prometheus Signed-off-by: Tarun Pothulapati --- .../prometheus/templates/prometheus.yaml | 2 - cli/cmd/testdata/install_helm_output.golden | 862 ++++++----- .../install_helm_output_addons.golden | 1376 +++++++++-------- .../testdata/install_helm_output_ha.golden | 959 +++++++----- .../testdata/upgrade_grafana_disabled.yaml | 11 +- .../upgrade_grafana_enabled_disabled.yaml | 11 +- 6 files changed, 1767 insertions(+), 1454 deletions(-) diff --git a/charts/add-ons/prometheus/templates/prometheus.yaml b/charts/add-ons/prometheus/templates/prometheus.yaml index 2f8d3707def18..53a1dff22fdc3 100644 --- a/charts/add-ons/prometheus/templates/prometheus.yaml +++ b/charts/add-ons/prometheus/templates/prometheus.yaml @@ -29,7 +29,6 @@ data: static_configs: - targets: ['localhost:9090'] - {{ if .Values.grafana.enabled -}} - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -40,7 +39,6 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - {{- end}} # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' diff --git a/cli/cmd/testdata/install_helm_output.golden b/cli/cmd/testdata/install_helm_output.golden index 9eea634775977..6badd6e16b53f 100644 --- a/cli/cmd/testdata/install_helm_output.golden +++ b/cli/cmd/testdata/install_helm_output.golden @@ -392,49 +392,6 @@ spec: description: The apex service of this split. JSONPath: .spec.service --- -# Source: linkerd2/templates/prometheus-rbac.yaml ---- -### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- # Source: linkerd2/templates/proxy-injector-rbac.yaml --- ### @@ -1848,151 +1805,74 @@ spec: medium: Memory name: linkerd-identity-end-entity --- -# Source: linkerd2/templates/grafana.yaml +# Source: linkerd2/templates/proxy-injector.yaml --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: + linkerd.io/helm-release-version: "0" linkerd.io/created-by: linkerd/helm linkerd-version linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:linkerd-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2108,48 +1988,80 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -# Source: linkerd2/templates/proxy-injector.yaml +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- +# Source: linkerd2/templates/sp-validator.yaml --- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2158,40 +2070,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2309,80 +2219,63 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -# Source: linkerd2/templates/sp-validator.yaml +# Source: linkerd2/templates/tap.yaml --- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/helm linkerd-version spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2391,41 +2284,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2540,111 +2438,219 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls --- -# Source: linkerd2/templates/tap.yaml +# Source: linkerd2/templates/smi-metrics.yaml + +--- +# Source: linkerd2/templates/linkerd-config-addons.yaml --- ### -### Tap +### linkerd add-ons configuration ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +data: + values: |- + grafana: + enabled: true + image: gcr.io/linkerd-io/grafana + name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + tracing: + enabled: false +--- +# Source: linkerd2/charts/grafana/templates/grafana-rbac.yaml +--- +### +### Grafana RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd +--- +# Source: linkerd2/charts/grafana/templates/grafana.yaml +--- +### +### Grafana +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-grafana-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/helm linkerd-version spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: http + port: 3000 + targetPort: 3000 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: - linkerd.io/helm-release-version: "0" linkerd.io/created-by: linkerd/helm linkerd-version linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-grafana spec: nodeSelector: - beta.kubernetes.io/os: linux + null containers: - - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:linkerd-version + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 - name: admin-http + - containerPort: 3000 + name: http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /api/health + port: 3000 securityContext: - runAsUser: 2103 + runAsUser: 472 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2759,144 +2765,235 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls --- -# Source: linkerd2/templates/smi-metrics.yaml - ---- -# Source: linkerd2/templates/linkerd-config-addons.yaml +# Source: linkerd2/charts/prometheus/templates/prometheus-rbac.yaml --- ### -### linkerd add-ons configuration +### Prometheus RBAC ### --- -kind: ConfigMap -apiVersion: v1 +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: linkerd-config-addons - namespace: linkerd + name: linkerd-linkerd-prometheus labels: + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -data: - values: |- - grafana: - enabled: true - image: gcr.io/linkerd-io/grafana - name: linkerd-grafana - tracing: - enabled: false ---- -# Source: linkerd2/charts/grafana/templates/grafana-rbac.yaml +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] --- -### -### Grafana RBAC -### +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd --- -# Source: linkerd2/charts/grafana/templates/grafana.yaml +# Source: linkerd2/charts/prometheus/templates/prometheus.yaml --- ### -### Grafana +### Prometheus ### --- kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-grafana-config + name: linkerd-prometheus-config namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/helm linkerd-version data: - grafana.ini: |- - instance_name = linkerd-grafana + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml - [auth] - disable_login_form = true + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - [auth.anonymous] - enabled = true - org_role = Editor + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ - [auth.basic] - enabled = false + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - [analytics] - check_for_updates = false + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - [panels] - disable_sanitize_html = true + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- kind: Service apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/helm linkerd-version spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus ports: - - name: http - port: 3000 - targetPort: 3000 + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -2904,20 +3001,20 @@ metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2925,39 +3022,45 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: null containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:linkerd-version + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/healthy + port: 9090 initialDelaySeconds: 30 - name: grafana + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 3000 - name: http + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: /api/health - port: 3000 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 472 + runAsUser: 65534 volumeMounts: - mountPath: /data name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -3075,20 +3178,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-prometheus volumes: - emptyDir: {} name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_helm_output_addons.golden b/cli/cmd/testdata/install_helm_output_addons.golden index c6721064ae05f..581a54386ed71 100644 --- a/cli/cmd/testdata/install_helm_output_addons.golden +++ b/cli/cmd/testdata/install_helm_output_addons.golden @@ -392,49 +392,6 @@ spec: description: The apex service of this split. JSONPath: .spec.service --- -# Source: linkerd2/templates/prometheus-rbac.yaml ---- -### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- # Source: linkerd2/templates/proxy-injector-rbac.yaml --- ### @@ -1849,151 +1806,74 @@ spec: medium: Memory name: linkerd-identity-end-entity --- -# Source: linkerd2/templates/grafana.yaml +# Source: linkerd2/templates/proxy-injector.yaml --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: + linkerd.io/helm-release-version: "0" linkerd.io/created-by: linkerd/helm linkerd-version linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:linkerd-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2109,48 +1989,80 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -# Source: linkerd2/templates/proxy-injector.yaml +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- +# Source: linkerd2/templates/sp-validator.yaml --- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2159,258 +2071,25 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 2103 - volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - - mountPath: /var/run/linkerd/tls - name: tls - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - test-trust-anchor - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: test.trust.domain - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:test-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,222 - - --outbound-ports-to-ignore - - 443,111 - image: gcr.io/linkerd-io/proxy-init:test-proxy-init-version - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector - volumes: - - configMap: - name: linkerd-config - name: config - - name: tls - secret: - secretName: linkerd-proxy-injector-tls - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -# Source: linkerd2/templates/sp-validator.yaml ---- -### -### Service Profile Validator -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-sp-validator - namespace: linkerd - labels: - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: sp-validator - ports: - - name: sp-validator - port: 443 - targetPort: sp-validator ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version - labels: - app.kubernetes.io/name: sp-validator - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: sp-validator - template: - metadata: - annotations: - linkerd.io/helm-release-version: "0" - linkerd.io/created-by: linkerd/helm linkerd-version - linkerd.io/identity-mode: default - linkerd.io/proxy-version: test-proxy-version - labels: - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - sp-validator - - -log-level=info - image: gcr.io/linkerd-io/controller:linkerd-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9997 - initialDelaySeconds: 10 - name: sp-validator + name: sp-validator ports: - containerPort: 8443 name: sp-validator @@ -2796,6 +2475,19 @@ data: enabled: true image: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus tracing: collector: image: omnition/opencensus-collector:0.1.11 @@ -2805,90 +2497,510 @@ data: image: jaegertracing/all-in-one:1.17.1 name: linkerd-jaeger --- -# Source: linkerd2/charts/tracing/templates/tracing-rbac.yaml +# Source: linkerd2/charts/grafana/templates/grafana-rbac.yaml --- ### -### linkerd-collector RBAC +### Grafana RBAC ### --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-collector + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd --- +# Source: linkerd2/charts/grafana/templates/grafana.yaml +--- ### -### linkerd-jaeger RBAC +### Grafana ### --- -kind: ServiceAccount +kind: ConfigMap apiVersion: v1 metadata: - name: linkerd-jaeger + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line --- -# Source: linkerd2/charts/tracing/templates/tracing.yaml ---- -### -### Tracing Collector Service -### ---- +kind: Service apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: grafana + ports: + - name: http + port: 3000 + targetPort: 3000 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version + labels: + app.kubernetes.io/name: grafana + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: linkerd-version + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + name: linkerd-grafana + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-grafana + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version + linkerd.io/identity-mode: default + linkerd.io/proxy-version: test-proxy-version + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-grafana + spec: + nodeSelector: + null + containers: + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:linkerd-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana + ports: + - containerPort: 3000 + name: http + readinessProbe: + httpGet: + path: /api/health + port: 3000 + securityContext: + runAsUser: 472 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + test-trust-anchor + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: test.trust.domain + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:test-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191,222 + - --outbound-ports-to-ignore + - 443,111 + image: gcr.io/linkerd-io/proxy-init:test-proxy-init-version + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-grafana + volumes: + - emptyDir: {} + name: data + - configMap: + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +# Source: linkerd2/charts/prometheus/templates/prometheus-rbac.yaml +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +# Source: linkerd2/charts/prometheus/templates/prometheus.yaml +--- +### +### Prometheus +### +--- kind: ConfigMap +apiVersion: v1 metadata: - name: linkerd-collector-config + name: linkerd-prometheus-config namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/helm linkerd-version data: - linkerd-collector-config: | - receivers: - opencensus: - port: 55678 - zipkin: - port: 9411 - queued-exporters: - jaeger-all-in-one: - num-workers: 4 - queue-size: 100 - retry-on-failure: true - sender-type: jaeger-thrift-http - jaeger-thrift-http: - collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces - timeout: 5s + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) --- -apiVersion: v1 kind: Service +apiVersion: v1 metadata: - name: linkerd-collector + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/helm linkerd-version spec: type: ClusterIP - ports: - - name: opencensus - port: 55678 - protocol: TCP - targetPort: 55678 - - name: zipkin - port: 9411 - protocol: TCP - targetPort: 9411 selector: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 --- apiVersion: apps/v1 kind: Deployment @@ -2896,61 +3008,67 @@ metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: linkerd-collector + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-collector + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-collector - minReadySeconds: 5 - progressDeadlineSeconds: 120 + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version - prometheus.io/path: /metrics - prometheus.io/port: "8888" - prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-collector + linkerd.io/proxy-deployment: linkerd-prometheus spec: + nodeSelector: + null containers: - - command: - - /occollector_linux - - --config=/conf/linkerd-collector-config.yaml - env: - - name: GOGC - value: "80" - image: omnition/opencensus-collector:0.1.11 + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: / - port: 13133 - name: oc-collector + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 55678 - - containerPort: 9411 + - containerPort: 9090 + name: admin-http readinessProbe: httpGet: - path: / - port: 13133 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsUser: 65534 volumeMounts: - - mountPath: /conf - name: linkerd-collector-config-val + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2980,6 +3098,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3065,41 +3185,101 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-collector + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - items: - - key: linkerd-collector-config - path: linkerd-collector-config.yaml - name: linkerd-collector-config - name: linkerd-collector-config-val + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +# Source: linkerd2/charts/tracing/templates/tracing-rbac.yaml +--- ### -### Tracing Jaeger Service +### linkerd-collector RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-collector + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-ns: linkerd +--- ### +### linkerd-jaeger RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-jaeger + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-ns: linkerd +--- +# Source: linkerd2/charts/tracing/templates/tracing.yaml +--- +### +### Tracing Collector Service +### +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: linkerd-collector-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-collector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +data: + linkerd-collector-config: | + receivers: + opencensus: + port: 55678 + zipkin: + port: 9411 + queued-exporters: + jaeger-all-in-one: + num-workers: 4 + queue-size: 100 + retry-on-failure: true + sender-type: jaeger-thrift-http + jaeger-thrift-http: + collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces + timeout: 5s --- apiVersion: v1 kind: Service metadata: - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/helm linkerd-version spec: type: ClusterIP - selector: - linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: collection - port: 14268 - - name: ui - port: 16686 + - name: opencensus + port: 55678 + protocol: TCP + targetPort: 55678 + - name: zipkin + port: 9411 + protocol: TCP + targetPort: 9411 + selector: + linkerd.io/control-plane-component: linkerd-collector --- apiVersion: apps/v1 kind: Deployment @@ -3107,20 +3287,22 @@ metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: linkerd-jaeger + app.kubernetes.io/name: linkerd-collector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd - name: linkerd-jaeger + name: linkerd-collector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: linkerd-collector + minReadySeconds: 5 + progressDeadlineSeconds: 120 template: metadata: annotations: @@ -3131,22 +3313,35 @@ spec: prometheus.io/port: "8888" prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: linkerd-collector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: linkerd-collector spec: containers: - - args: - - --query.base-path=/jaeger - image: jaegertracing/all-in-one:1.17.1 + - command: + - /occollector_linux + - --config=/conf/linkerd-collector-config.yaml + env: + - name: GOGC + value: "80" + image: omnition/opencensus-collector:0.1.11 imagePullPolicy: IfNotPresent - name: jaeger + livenessProbe: + httpGet: + path: / + port: 13133 + name: oc-collector ports: - - containerPort: 14268 - name: collection - - containerPort: 16686 - name: ui + - containerPort: 55678 + - containerPort: 9411 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: linkerd-collector-config-val - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3261,112 +3456,41 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - dnsPolicy: ClusterFirst - serviceAccountName: linkerd-jaeger + serviceAccountName: linkerd-collector volumes: + - configMap: + items: + - key: linkerd-collector-config + path: linkerd-collector-config.yaml + name: linkerd-collector-config + name: linkerd-collector-config-val - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -# Source: linkerd2/charts/grafana/templates/grafana-rbac.yaml ---- -### -### Grafana RBAC -### ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd ---- -# Source: linkerd2/charts/grafana/templates/grafana.yaml ---- ### -### Grafana +### Tracing Jaeger Service ### --- -kind: ConfigMap apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- kind: Service -apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/helm linkerd-version spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: http - port: 3000 - targetPort: 3000 + - name: collection + port: 14268 + - name: ui + port: 16686 --- apiVersion: apps/v1 kind: Deployment @@ -3374,61 +3498,46 @@ metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: linkerd-jaeger app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger template: metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version + prometheus.io/path: /metrics + prometheus.io/port: "8888" + prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger spec: - nodeSelector: - null containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:linkerd-version + - args: + - --query.base-path=/jaeger + image: jaegertracing/all-in-one:1.17.1 imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + name: jaeger ports: - - containerPort: 3000 - name: http - readinessProbe: - httpGet: - path: /api/health - port: 3000 - securityContext: - runAsUser: 472 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config - readOnly: true + - containerPort: 14268 + name: collection + - containerPort: 16686 + name: ui - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3543,20 +3652,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + dnsPolicy: ClusterFirst + serviceAccountName: linkerd-jaeger volumes: - - emptyDir: {} - name: data - - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_helm_output_ha.golden b/cli/cmd/testdata/install_helm_output_ha.golden index 80966c7ba8199..3d4938c972ed4 100644 --- a/cli/cmd/testdata/install_helm_output_ha.golden +++ b/cli/cmd/testdata/install_helm_output_ha.golden @@ -392,49 +392,6 @@ spec: description: The apex service of this split. JSONPath: .spec.service --- -# Source: linkerd2/templates/prometheus-rbac.yaml ---- -### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- # Source: linkerd2/templates/proxy-injector-rbac.yaml --- ### @@ -1976,158 +1933,104 @@ spec: medium: Memory name: linkerd-identity-end-entity --- -# Source: linkerd2/templates/grafana.yaml +# Source: linkerd2/templates/proxy-injector.yaml --- ### -### Grafana +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: grafana - ports: - - name: http - port: 3000 - targetPort: 3000 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-proxy-injector namespace: linkerd spec: - replicas: 1 + replicas: 3 selector: matchLabels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/control-plane-component: proxy-injector + strategy: + rollingUpdate: + maxUnavailable: 1 template: metadata: annotations: + linkerd.io/helm-release-version: "0" linkerd.io/created-by: linkerd/helm linkerd-version linkerd.io/identity-mode: default linkerd.io/proxy-version: test-proxy-version labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: linkerd.io/control-plane-component + operator: In + values: + - proxy-injector + topologyKey: kubernetes.io/hostname containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:linkerd-version + - args: + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 3000 - name: http + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 + name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /api/health - port: 3000 + path: /ready + port: 9995 resources: limits: cpu: "1" - memory: "1024Mi" + memory: "250Mi" requests: cpu: "100m" memory: "50Mi" securityContext: - runAsUser: 472 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2249,288 +2152,51 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -# Source: linkerd2/templates/proxy-injector.yaml +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- +# Source: linkerd2/templates/sp-validator.yaml --- ### -### Proxy Injector +### Service Profile Validator ### --- -apiVersion: apps/v1 -kind: Deployment +kind: Service +apiVersion: v1 metadata: - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 3 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - strategy: - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - annotations: - linkerd.io/helm-release-version: "0" - linkerd.io/created-by: linkerd/helm linkerd-version - linkerd.io/identity-mode: default - linkerd.io/proxy-version: test-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: failure-domain.beta.kubernetes.io/zone - weight: 100 - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: kubernetes.io/hostname - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:linkerd-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - resources: - limits: - cpu: "1" - memory: "250Mi" - requests: - cpu: "100m" - memory: "50Mi" - securityContext: - runAsUser: 2103 - volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - - mountPath: /var/run/linkerd/tls - name: tls - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - test-trust-anchor - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: test.trust.domain - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:test-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - limits: - cpu: "1" - memory: "250Mi" - requests: - cpu: "100m" - memory: "20Mi" - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,222 - - --outbound-ports-to-ignore - - 443,111 - image: gcr.io/linkerd-io/proxy-init:test-proxy-init-version - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector - volumes: - - configMap: - name: linkerd-config - name: config - - name: tls - secret: - secretName: linkerd-proxy-injector-tls - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- -# Source: linkerd2/templates/sp-validator.yaml ---- -### -### Service Profile Validator -### ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-sp-validator - namespace: linkerd - labels: - linkerd.io/control-plane-component: sp-validator - linkerd.io/control-plane-ns: linkerd + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/helm linkerd-version spec: @@ -2545,8 +2211,8 @@ spec: apiVersion: apps/v1 kind: Deployment metadata: - name: linkerd-sp-validator - namespace: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version labels: app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd @@ -3051,6 +2717,26 @@ data: memory: limit: 1024Mi request: 50Mi + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + resources: + cpu: + limit: "4" + request: 300m + memory: + limit: 8192Mi + request: 300Mi tracing: enabled: false --- @@ -3212,8 +2898,8 @@ spec: cpu: "1" memory: "1024Mi" requests: - cpu: "300m" - memory: "300Mi" + cpu: "100m" + memory: "50Mi" securityContext: runAsUser: 472 volumeMounts: @@ -3251,8 +2937,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -3361,3 +3045,422 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +# Source: linkerd2/charts/prometheus/templates/prometheus-rbac.yaml +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +# Source: linkerd2/charts/prometheus/templates/prometheus.yaml +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: linkerd-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/helm linkerd-version + linkerd.io/identity-mode: default + linkerd.io/proxy-version: test-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + null + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + resources: + limits: + cpu: "4" + memory: "8192Mi" + requests: + cpu: "300m" + memory: "300Mi" + securityContext: + runAsUser: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + test-trust-anchor + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: test.trust.domain + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:test-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + limits: + cpu: "1" + memory: "250Mi" + requests: + cpu: "100m" + memory: "20Mi" + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191,222 + - --outbound-ports-to-ignore + - 443,111 + image: gcr.io/linkerd-io/proxy-init:test-proxy-init-version + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-prometheus + volumes: + - emptyDir: {} + name: data + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_grafana_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_disabled.yaml index b225a4fd39c24..7770604440273 100644 --- a/cli/cmd/testdata/upgrade_grafana_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_disabled.yaml @@ -2523,7 +2523,16 @@ data: static_configs: - targets: ['localhost:9090'] - + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' diff --git a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml index b225a4fd39c24..7770604440273 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml @@ -2523,7 +2523,16 @@ data: static_configs: - targets: ['localhost:9090'] - + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' From bfd5b24dec2ae6c9b5520dce11f7d2097cc11e2b Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Tue, 12 May 2020 19:29:42 +0530 Subject: [PATCH 15/42] rename to prometheus test Signed-off-by: Tarun Pothulapati --- cli/cmd/install_addon_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cli/cmd/install_addon_test.go b/cli/cmd/install_addon_test.go index 1df4e56205db7..21d885022050e 100644 --- a/cli/cmd/install_addon_test.go +++ b/cli/cmd/install_addon_test.go @@ -21,20 +21,20 @@ func TestAddOnRender(t *testing.T) { withTracingAddonValues.Tracing["enabled"] = true addFakeTLSSecrets(withTracingAddonValues) - withGrafanaAddOnOverwrite, err := testInstallOptions() + withPrometheusAddOnOverwrite, err := testInstallOptions() if err != nil { t.Fatalf("Unexpected error: %v\n", err) } - withGrafanaAddOnOverwrite.addOnConfig = filepath.Join("testdata", "prom-config.yaml") - withGrafanaAddOnOverwriteValues, _, _ := withGrafanaAddOnOverwrite.validateAndBuild("", nil) - addFakeTLSSecrets(withGrafanaAddOnOverwriteValues) + withPrometheusAddOnOverwrite.addOnConfig = filepath.Join("testdata", "prom-config.yaml") + withPrometheusAddOnOverwriteValues, _, _ := withPrometheusAddOnOverwrite.validateAndBuild("", nil) + addFakeTLSSecrets(withPrometheusAddOnOverwriteValues) testCases := []struct { values *charts.Values goldenFileName string }{ {withTracingAddonValues, "install_tracing.golden"}, - {withGrafanaAddOnOverwriteValues, "install_prometheus_overwrite.golden"}, + {withPrometheusAddOnOverwriteValues, "install_prometheus_overwrite.golden"}, } for i, tc := range testCases { From ec245e13f924100d3fdbb28af0dbb66cf103a089 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Wed, 13 May 2020 15:31:18 +0530 Subject: [PATCH 16/42] full config with examples Signed-off-by: Tarun Pothulapati --- .../prometheus/templates/prometheus.yaml | 8 ++- charts/linkerd2/values.yaml | 26 +++++++++ .../install_prometheus_overwrite.golden | 53 ++++++++++++------- cli/cmd/testdata/prom-config.yaml | 36 +++++++++---- 4 files changed, 93 insertions(+), 30 deletions(-) diff --git a/charts/add-ons/prometheus/templates/prometheus.yaml b/charts/add-ons/prometheus/templates/prometheus.yaml index 53a1dff22fdc3..07edb87ba3d0d 100644 --- a/charts/add-ons/prometheus/templates/prometheus.yaml +++ b/charts/add-ons/prometheus/templates/prometheus.yaml @@ -151,10 +151,16 @@ data: {{ toYaml .Values.scrapeConfigs | trim | nindent 4 }} {{ end }} - {{- if .Values.alertManagers }} + {{- if (or .Values.alertManagers .Values.alertRelabelConfigs) }} alerting: + alert_relabel_configs: + {{- if .Values.alertRelabelConfigs }} + {{- toYaml .Values.alertRelabelConfigs | trim | nindent 6 }} + {{- end }} alertmanagers: + {{- if .Values.alertManagers }} {{- toYaml .Values.alertManagers | trim | nindent 6 }} + {{- end }} {{- end }} {{- if .Values.remoteWrite }} diff --git a/charts/linkerd2/values.yaml b/charts/linkerd2/values.yaml index 2d8f083b2030e..1424c3dbefce4 100644 --- a/charts/linkerd2/values.yaml +++ b/charts/linkerd2/values.yaml @@ -194,6 +194,32 @@ prometheus: scrape_interval: 10s scrape_timeout: 10s evaluation_interval: 10s + # scrapeConfigs: + # - job_name: 'kubernetes-nodes' + # scheme: https + # tls_config: + # ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # kubernetes_sd_configs: + # - role: node + # relabel_configs: + # - action: labelmap + # regex: __meta_kubernetes_node_label_(.+) + # alertManagers: + # - scheme: http + # static_configs: + # - targets: + # - "alertmanager.linkerd.svc:9093" + # alertRelabelConfigs: + # - action: labeldrop + # regex: prometheus_replica + # ruleConfigMapMounts: + # - name: alerting-rules + # subPath: alerting_rules.yml + # configMap: linkerd-prometheus-rules + # - name: recording-rules + # subPath: recording_rules.yml + # configMap: linkerd-prometheus-rules tracing: enabled: false diff --git a/cli/cmd/testdata/install_prometheus_overwrite.golden b/cli/cmd/testdata/install_prometheus_overwrite.golden index ef733bbedd1c9..0a42ad56b0df5 100644 --- a/cli/cmd/testdata/install_prometheus_overwrite.golden +++ b/cli/cmd/testdata/install_prometheus_overwrite.golden @@ -2434,8 +2434,12 @@ data: static_configs: - targets: - alertmanager.linkerd.svc:9093 + alertRelabelConfigs: + - action: labeldrop + regex: prometheus_replica args: config.file: /etc/prometheus/prometheus.yml + log.format: json log.level: info storage.tsdb.path: /data storage.tsdb.retention.time: 6h @@ -2450,16 +2454,24 @@ data: name: linkerd-prometheus remoteWrite: - url: http://cortex-service.default:9009/api/prom/push + ruleConfigMapMounts: + - configMap: linkerd-prometheus-rules + name: alerting-rules + subPath: alerting_rules.yml + - configMap: linkerd-prometheus-rules + name: recording-rules + subPath: recording_rules.yml scrapeConfigs: - - azure_sd_configs: - - authentication_method: OAuth - client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C - client_secret: mysecret - environment: AzurePublicCloud - port: 9100 - subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11 - tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2 - job_name: service-azure + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt tracing: enabled: false --- @@ -2952,17 +2964,21 @@ data: - action: labelmap regex: __tmp_pod_label_(.+) - - azure_sd_configs: - - authentication_method: OAuth - client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C - client_secret: mysecret - environment: AzurePublicCloud - port: 9100 - subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11 - tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2 - job_name: service-azure + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt alerting: + alert_relabel_configs: + - action: labeldrop + regex: prometheus_replica alertmanagers: - scheme: http static_configs: @@ -3027,6 +3043,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml + - --log.format=json - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h diff --git a/cli/cmd/testdata/prom-config.yaml b/cli/cmd/testdata/prom-config.yaml index 0c6c7df116327..521d821656be2 100644 --- a/cli/cmd/testdata/prom-config.yaml +++ b/cli/cmd/testdata/prom-config.yaml @@ -1,25 +1,39 @@ +global: + prometheusUrl: http://cortex-service.default:9009/api/prom + prometheus: + args: + log.format: json globalConfig: evaluation_interval: 2m external_labels: cluster: cluster-1 scrapeConfigs: - - job_name: service-azure - azure_sd_configs: - - environment: AzurePublicCloud - authentication_method: OAuth - subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11 - tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2 - client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C - client_secret: mysecret - port: 9100 - + - job_name: 'kubernetes-nodes' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) alertManagers: - scheme: http static_configs: - targets: - "alertmanager.linkerd.svc:9093" - + alertRelabelConfigs: + - action: labeldrop + regex: prometheus_replica + ruleConfigMapMounts: + - name: alerting-rules + subPath: alerting_rules.yml + configMap: linkerd-prometheus-rules + - name: recording-rules + subPath: recording_rules.yml + configMap: linkerd-prometheus-rules remoteWrite: - url: http://cortex-service.default:9009/api/prom/push From e3d58c6f4a540591ef802a6c1eb9b6a92552de9e Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Wed, 13 May 2020 16:30:16 +0530 Subject: [PATCH 17/42] add args documentation in charts/README.md Signed-off-by: Tarun Pothulapati --- charts/linkerd2/README.md | 28 +++++++++++++++++++++++----- charts/linkerd2/values.yaml | 1 + 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/charts/linkerd2/README.md b/charts/linkerd2/README.md index 07aa4df91e8c1..32ce7d5686820 100644 --- a/charts/linkerd2/README.md +++ b/charts/linkerd2/README.md @@ -145,11 +145,6 @@ The following table lists the configurable parameters of the Linkerd2 chart and | `identity.issuer.tls.keyPEM` | Key for the issuer certificate (ECDSA). It must be provided during install. | | | `installNamespace` | Set to false when installing Linkerd in a custom namespace. See the [Linkerd documentation](https://linkerd.io/2/tasks/install-helm/#customizing-the-namespace) for more information. | `true` | | `omitWebhookSideEffects` | Omit the `sideEffects` flag in the webhook manifests | `false` | -| `prometheusAlertmanagers` | Alertmanager instances the Prometheus server sends alerts to configured via the static_configs parameter. | `[]` | -| `prometheusExtraArgs` | Extra command line options for Prometheus | `{}` | -| `prometheusImage` | Docker image for the Prometheus container | `prom/prometheus:v2.15.2` | -| `prometheusLogLevel` | Log level for Prometheus | `info` | -| `prometheusRuleConfigMapMounts` | Alerting/recording rule ConfigMap mounts (sub-path names must end in `_rules.yml` or `_rules.yaml`) | `[]` | | `proxyInjector.crtPEM` | Certificate for the proxy injector. If not provided then Helm will generate one. | | | `proxyInjector.keyPEM` | Certificate key for the proxy injector. If not provided then Helm will generate one. | | | `profileValidator.crtPEM` | Certificate for the service profile validator. If not provided then Helm will generate one. | | @@ -176,6 +171,29 @@ The following table lists the configurable parameters for the Grafana Add-On. | `grafana.resources.memory.limit` | Maximum amount of memory that grafana container can use || | `grafana.resources.memory.request` | Amount of memory that the grafana container requests || +### Prometheus Add-On + +The following table lists the configurable parameters for the Prometheus Add-On. + +| Parameter | Description | Default | +|:--------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------| +| `prometheus.alert_relabel_configs` | Alert relabeling is applied to alerts before they are sent to the Alertmanager. | `[]` | +| `prometheus.alertManagers` | Alertmanager instances the Prometheus server sends alerts to configured via the static_configs parameter. | `[]` | +| `prometheus.args` | Command line options for Prometheus binary | `storage.tsdb.path: /data, storage.tsdb.retention.time: 6h, config.file: /etc/prometheus/prometheus.yml, log.level: *controller_log_level` | +| `prometheus.enabled` | Flag to enable prometheus instance to be installed | `true` +| `prometheus.globalConfig` | The global configuration specifies parameters that are valid in all other configuration contexts. | `scrape_interval: 10s, scrape_timeout: 10s, evaluation_interval: 10s` | +| `prometheus.image` | Docker image for the grafana instance | `prom/prometheus:v2.15.2` | +| `prometheus.name` | Name of the prometheus instance Service | `linkerd-prometheus` | +| `prometheus.resources.cpu.limit` | Maximum amount of CPU units that the prometheus container can use || +| `prometheus.resources.cpu.request` | Amount of CPU units that the prometheus container requests || +| `prometheus.resources.memory.limit` | Maximum amount of memory that prometheus container can use || +| `prometheus.resources.memory.request` | Amount of memory that the prometheus container requests || +| `prometheus.ruleConfigMapMounts` | Alerting/recording rule ConfigMap mounts (sub-path names must end in `_rules.yml` or `_rules.yaml`) | `[]` | +| `prometheus.scrapeConfigs` | A scrape_config section specifies a set of targets and parameters describing how to scrape them. | `[]` | + +Most of the above configuration match directly with the official Prometheus +configuration which can be found [here](https://prometheus.io/docs/prometheus/latest/configuration/configuration) + ### Tracing Add-On The following table lists the configurable parameters for the Tracing Add-On. diff --git a/charts/linkerd2/values.yaml b/charts/linkerd2/values.yaml index 1424c3dbefce4..ad23b6f333e96 100644 --- a/charts/linkerd2/values.yaml +++ b/charts/linkerd2/values.yaml @@ -194,6 +194,7 @@ prometheus: scrape_interval: 10s scrape_timeout: 10s evaluation_interval: 10s + # resources: # scrapeConfigs: # - job_name: 'kubernetes-nodes' # scheme: https From e5dbbd44181c367bded4c5b149440e5f2a58490b Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Wed, 13 May 2020 16:31:18 +0530 Subject: [PATCH 18/42] fix name in docs Signed-off-by: Tarun Pothulapati --- charts/linkerd2/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/linkerd2/README.md b/charts/linkerd2/README.md index 32ce7d5686820..a4bf7dbbf056c 100644 --- a/charts/linkerd2/README.md +++ b/charts/linkerd2/README.md @@ -182,7 +182,7 @@ The following table lists the configurable parameters for the Prometheus Add-On. | `prometheus.args` | Command line options for Prometheus binary | `storage.tsdb.path: /data, storage.tsdb.retention.time: 6h, config.file: /etc/prometheus/prometheus.yml, log.level: *controller_log_level` | | `prometheus.enabled` | Flag to enable prometheus instance to be installed | `true` | `prometheus.globalConfig` | The global configuration specifies parameters that are valid in all other configuration contexts. | `scrape_interval: 10s, scrape_timeout: 10s, evaluation_interval: 10s` | -| `prometheus.image` | Docker image for the grafana instance | `prom/prometheus:v2.15.2` | +| `prometheus.image` | Docker image for the prometheus instance | `prom/prometheus:v2.15.2` | | `prometheus.name` | Name of the prometheus instance Service | `linkerd-prometheus` | | `prometheus.resources.cpu.limit` | Maximum amount of CPU units that the prometheus container can use || | `prometheus.resources.cpu.request` | Amount of CPU units that the prometheus container requests || From c8ba28310e64ed370a1b815d0d2a3a3243ed1e27 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Wed, 13 May 2020 17:09:03 +0530 Subject: [PATCH 19/42] fix ruleConfigMapMounts Signed-off-by: Tarun Pothulapati --- .../add-ons/prometheus/templates/prometheus.yaml | 4 ++-- .../testdata/install_prometheus_overwrite.golden | 14 ++++++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/charts/add-ons/prometheus/templates/prometheus.yaml b/charts/add-ons/prometheus/templates/prometheus.yaml index 07edb87ba3d0d..3f59848604f3a 100644 --- a/charts/add-ons/prometheus/templates/prometheus.yaml +++ b/charts/add-ons/prometheus/templates/prometheus.yaml @@ -254,7 +254,7 @@ spec: securityContext: runAsUser: 65534 volumeMounts: - {{- range .Values.ruleCofigMapMounts }} + {{- range .Values.ruleConfigMapMounts }} - name: {{ .name }} mountPath: /etc/prometheus/{{ .subPath }} subPath: {{ .subPath }} @@ -273,7 +273,7 @@ spec: {{ end -}} serviceAccountName: {{.Values.name}} volumes: - {{- range .Values.ruleCofigMapMounts }} + {{- range .Values.ruleConfigMapMounts }} - name: {{ .name }} configMap: name: {{ .configMap }} diff --git a/cli/cmd/testdata/install_prometheus_overwrite.golden b/cli/cmd/testdata/install_prometheus_overwrite.golden index 0a42ad56b0df5..fa00a8b4cae0b 100644 --- a/cli/cmd/testdata/install_prometheus_overwrite.golden +++ b/cli/cmd/testdata/install_prometheus_overwrite.golden @@ -3068,6 +3068,14 @@ spec: securityContext: runAsUser: 65534 volumeMounts: + - name: alerting-rules + mountPath: /etc/prometheus/alerting_rules.yml + subPath: alerting_rules.yml + readOnly: true + - name: recording-rules + mountPath: /etc/prometheus/recording_rules.yml + subPath: recording_rules.yml + readOnly: true - mountPath: /data name: data - mountPath: /etc/prometheus/prometheus.yml @@ -3201,6 +3209,12 @@ spec: terminationMessagePolicy: FallbackToLogsOnError serviceAccountName: linkerd-prometheus volumes: + - name: alerting-rules + configMap: + name: linkerd-prometheus-rules + - name: recording-rules + configMap: + name: linkerd-prometheus-rules - emptyDir: {} name: data - configMap: From 49be09b949a69d144417b6b8f4a0dbccb4e5cf80 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Mon, 15 Jun 2020 12:39:21 +0530 Subject: [PATCH 20/42] update go tests golden files Signed-off-by: Tarun Pothulapati --- .../install_addon_control-plane.golden | 196 +-- cli/cmd/testdata/install_control-plane.golden | 45 +- ...install_controlplane_tracing_output.golden | 219 +-- .../testdata/install_custom_registry.golden | 205 +-- cli/cmd/testdata/install_default.golden | 205 +-- .../testdata/install_grafana_existing.golden | 748 ++++----- cli/cmd/testdata/install_ha_output.golden | 246 +-- .../install_ha_with_overrides_output.golden | 243 +-- .../install_heartbeat_disabled_output.golden | 205 +-- .../install_prometheus_overwrite.golden | 35 +- cli/cmd/testdata/install_proxy_ignores.golden | 205 +-- .../install_restricted_dashboard.golden | 205 +-- cli/cmd/testdata/install_tracing.golden | 208 +-- .../testdata/install_tracing_overwrite.golden | 1367 +++++++++-------- .../upgrade_add-on_controlplane.golden | 201 +-- .../testdata/upgrade_add-on_overwrite.golden | 220 +-- cli/cmd/testdata/upgrade_add_add-on.golden | 210 +-- cli/cmd/testdata/upgrade_default.golden | 210 +-- .../testdata/upgrade_external_issuer.golden | 207 +-- .../upgrade_grafana_addon_overwrite.yaml | 881 +++++------ .../testdata/upgrade_grafana_disabled.yaml | 28 +- cli/cmd/testdata/upgrade_grafana_enabled.yaml | 207 +-- .../upgrade_grafana_enabled_disabled.yaml | 31 +- .../testdata/upgrade_grafana_overwrite.yaml | 207 +-- cli/cmd/testdata/upgrade_ha.golden | 245 +-- .../upgrade_keep_webhook_cabundle.golden | 881 +++++------ cli/cmd/testdata/upgrade_nothing_addon.yaml | 207 +-- .../testdata/upgrade_overwrite_issuer.golden | 205 +-- ...write_trust_anchors-external-issuer.golden | 205 +-- .../upgrade_overwrite_trust_anchors.golden | 205 +-- .../upgrade_two_level_webhook_cert.golden | 881 +++++------ 31 files changed, 2474 insertions(+), 7089 deletions(-) diff --git a/cli/cmd/testdata/install_addon_control-plane.golden b/cli/cmd/testdata/install_addon_control-plane.golden index 4c1e2229cdc18..52e917a97c988 100644 --- a/cli/cmd/testdata/install_addon_control-plane.golden +++ b/cli/cmd/testdata/install_addon_control-plane.golden @@ -1163,200 +1163,6 @@ spec: --- kind: Service apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 2103 - volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - - mountPath: /var/run/linkerd/tls - name: tls - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector - volumes: - - configMap: - name: linkerd-config - name: config - - name: tls - secret: - secretName: linkerd-proxy-injector-tls - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -kind: Service -apiVersion: v1 metadata: name: linkerd-proxy-injector namespace: linkerd @@ -2485,7 +2291,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: diff --git a/cli/cmd/testdata/install_control-plane.golden b/cli/cmd/testdata/install_control-plane.golden index 7170ed317450f..f124883fc3d4c 100644 --- a/cli/cmd/testdata/install_control-plane.golden +++ b/cli/cmd/testdata/install_control-plane.golden @@ -1638,9 +1638,12 @@ metadata: linkerd.io/created-by: linkerd/cli dev-undefined data: values: |- + global: + grafanaUrl: "" grafana: enabled: true - image: gcr.io/linkerd-io/grafana + image: + name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: args: @@ -1930,40 +1933,18 @@ spec: - emptyDir: {} name: data - configMap: - name: linkerd-config - name: config + items: + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - global: - grafanaUrl: "" - grafana: - enabled: true - image: - name: gcr.io/linkerd-io/grafana - name: linkerd-grafana - tracing: - enabled: false --- ### ### Prometheus diff --git a/cli/cmd/testdata/install_controlplane_tracing_output.golden b/cli/cmd/testdata/install_controlplane_tracing_output.golden index 1c30e7c0ce96c..f0339893a4150 100644 --- a/cli/cmd/testdata/install_controlplane_tracing_output.golden +++ b/cli/cmd/testdata/install_controlplane_tracing_output.golden @@ -1811,223 +1811,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_TRACE_ATTRIBUTES_PATH - value: /var/run/linkerd/podinfo/labels - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TRACE_COLLECTOR_SVC_ADDR - value: linkerd-collector.linkerd.svc.cluster.local:55678 - - name: LINKERD2_PROXY_TRACE_COLLECTOR_SVC_NAME - value: linkerd-collector.linkerd.serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: var/run/linkerd/podinfo - name: podinfo - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - downwardAPI: - items: - - fieldRef: - fieldPath: metadata.labels - path: "labels" - name: podinfo - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3466,7 +3249,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: diff --git a/cli/cmd/testdata/install_custom_registry.golden b/cli/cmd/testdata/install_custom_registry.golden index 6b197553f2140..33122a06af0ed 100644 --- a/cli/cmd/testdata/install_custom_registry.golden +++ b/cli/cmd/testdata/install_custom_registry.golden @@ -1751,209 +1751,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: my.custom.registry/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: my.custom.registry/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: my.custom.registry/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3327,7 +3124,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: my.custom.registry/linkerd-io/proxy-init:v1.3.2 + image: my.custom.registry/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: diff --git a/cli/cmd/testdata/install_default.golden b/cli/cmd/testdata/install_default.golden index 5a0e3cf46c8ba..dcaa709f20788 100644 --- a/cli/cmd/testdata/install_default.golden +++ b/cli/cmd/testdata/install_default.golden @@ -1751,209 +1751,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3327,7 +3124,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: diff --git a/cli/cmd/testdata/install_grafana_existing.golden b/cli/cmd/testdata/install_grafana_existing.golden index 698a37c4d3f2f..2d7f7d3331d7a 100644 --- a/cli/cmd/testdata/install_grafana_existing.golden +++ b/cli/cmd/testdata/install_grafana_existing.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1784,183 +1743,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1968,45 +1771,42 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /ready + port: 9995 securityContext: - runAsUser: 65534 + runAsUser: 2103 volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml + - mountPath: /var/run/linkerd/config + name: config + - mountPath: /var/run/linkerd/tls + name: tls readOnly: true - env: - name: LINKERD2_PROXY_LOG @@ -2037,8 +1837,6 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2133,39 +1931,78 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-proxy-injector volumes: - - emptyDir: {} - name: data - configMap: - name: linkerd-prometheus-config - name: prometheus-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-proxy-injector + namespace: linkerd + labels: + linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: proxy-injector + ports: + - name: proxy-injector + port: 443 + targetPort: proxy-injector +--- ### -### Proxy Injector +### Service Profile Validator ### --- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-sp-validator + namespace: linkerd + labels: + linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: sp-validator + ports: + - name: sp-validator + port: 443 + targetPort: sp-validator +--- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: proxy-injector + app.kubernetes.io/name: sp-validator app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector + name: linkerd-sp-validator namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator template: metadata: annotations: @@ -2173,40 +2010,38 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: proxy-injector + linkerd.io/control-plane-component: sp-validator linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector + linkerd.io/proxy-deployment: linkerd-sp-validator spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - proxy-injector + - sp-validator - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9995 + port: 9997 initialDelaySeconds: 10 - name: proxy-injector + name: sp-validator ports: - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 + name: sp-validator + - containerPort: 9997 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9995 + port: 9997 securityContext: runAsUser: 2103 volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - mountPath: /var/run/linkerd/tls name: tls readOnly: true @@ -2333,78 +2168,61 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - - configMap: - name: linkerd-config - name: config - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: medium: Memory name: linkerd-identity-end-entity --- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: proxy-injector - ports: - - name: proxy-injector - port: 443 - targetPort: proxy-injector ---- ### -### Service Profile Validator +### Tap ### --- kind: Service apiVersion: v1 metadata: - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap ports: - - name: sp-validator + - name: grpc + port: 8088 + targetPort: 8088 + - name: apiserver port: 443 - targetPort: sp-validator + targetPort: apiserver --- -apiVersion: apps/v1 kind: Deployment +apiVersion: apps/v1 metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: sp-validator + app.kubernetes.io/name: tap app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd - name: linkerd-sp-validator + name: linkerd-tap namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-tap template: metadata: annotations: @@ -2412,41 +2230,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: sp-validator + linkerd.io/control-plane-component: tap linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-sp-validator + linkerd.io/proxy-deployment: linkerd-tap spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - sp-validator + - tap + - -controller-namespace=linkerd - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /ping - port: 9997 + port: 9998 initialDelaySeconds: 10 - name: sp-validator + name: tap ports: - - containerPort: 8443 - name: sp-validator - - containerPort: 9997 + - containerPort: 8088 + name: grpc + - containerPort: 8089 + name: apiserver + - containerPort: 9998 name: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready - port: 9997 + port: 9998 securityContext: runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/tls name: tls readOnly: true + - mountPath: /var/run/linkerd/config + name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2570,61 +2393,282 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: medium: Memory name: linkerd-identity-end-entity + - name: tls + secret: + secretName: linkerd-tap-tls + --- ### -### Tap +### linkerd add-ons configuration ### --- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-config-addons + namespace: linkerd + labels: + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + values: |- + global: + grafanaUrl: "somegrafana.xyz" + grafana: + enabled: false + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus + tracing: + enabled: false +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- kind: Service apiVersion: v1 metadata: - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus ports: - - name: grpc - port: 8088 - targetPort: 8088 - - name: apiserver - port: 443 - targetPort: apiserver + - name: admin-http + port: 9090 + targetPort: 9090 --- -kind: Deployment apiVersion: apps/v1 +kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: tap + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - name: linkerd-tap + name: linkerd-prometheus namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-prometheus template: metadata: annotations: @@ -2632,46 +2676,46 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: tap + linkerd.io/control-plane-component: prometheus linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-tap + linkerd.io/proxy-deployment: linkerd-prometheus spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - tap - - -controller-namespace=linkerd - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /ping - port: 9998 - initialDelaySeconds: 10 - name: tap + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus ports: - - containerPort: 8088 - name: grpc - - containerPort: 8089 - name: apiserver - - containerPort: 9998 + - containerPort: 9090 name: admin-http readinessProbe: - failureThreshold: 7 httpGet: - path: /ready - port: 9998 + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 securityContext: - runAsUser: 2103 + runAsUser: 65534 volumeMounts: - - mountPath: /var/run/linkerd/tls - name: tls + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml readOnly: true - - mountPath: /var/run/linkerd/config - name: config - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -2701,6 +2745,8 @@ spec: fieldPath: metadata.namespace - name: LINKERD2_PROXY_DESTINATION_CONTEXT value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" - name: LINKERD2_PROXY_IDENTITY_DIR value: /var/run/linkerd/identity/end-entity - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS @@ -2795,37 +2841,13 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: + - emptyDir: {} + name: data - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - global: - grafanaUrl: "somegrafana.xyz" - grafana: - enabled: false - tracing: - enabled: false diff --git a/cli/cmd/testdata/install_ha_output.golden b/cli/cmd/testdata/install_ha_output.golden index 64e8571b2071f..e28c0f0802208 100644 --- a/cli/cmd/testdata/install_ha_output.golden +++ b/cli/cmd/testdata/install_ha_output.golden @@ -1934,250 +1934,6 @@ spec: topologyKey: kubernetes.io/hostname containers: - args: -<<<<<<< HEAD -======= - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus - ports: - - containerPort: 9090 - name: admin-http - readinessProbe: - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - resources: - limits: - cpu: "4" - memory: "8192Mi" - requests: - cpu: "300m" - memory: "300Mi" - securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - limits: - cpu: "1" - memory: "250Mi" - requests: - cpu: "100m" - memory: "20Mi" - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 3 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - strategy: - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: failure-domain.beta.kubernetes.io/zone - weight: 100 - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: kubernetes.io/hostname - containers: - - args: ->>>>>>> master - proxy-injector - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version @@ -3644,7 +3400,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: diff --git a/cli/cmd/testdata/install_ha_with_overrides_output.golden b/cli/cmd/testdata/install_ha_with_overrides_output.golden index b9d8a6d896bb1..98d8c69955acf 100644 --- a/cli/cmd/testdata/install_ha_with_overrides_output.golden +++ b/cli/cmd/testdata/install_ha_with_overrides_output.golden @@ -1879,247 +1879,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 2 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - strategy: - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: failure-domain.beta.kubernetes.io/zone - weight: 100 - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: kubernetes.io/hostname - containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus - ports: - - containerPort: 9090 - name: admin-http - readinessProbe: - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - resources: - limits: - cpu: "4" - memory: "8192Mi" - requests: - cpu: "300m" - memory: "300Mi" - securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - limits: - cpu: "1" - memory: "250Mi" - requests: - cpu: "400m" - memory: "300Mi" - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3641,7 +3400,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: diff --git a/cli/cmd/testdata/install_heartbeat_disabled_output.golden b/cli/cmd/testdata/install_heartbeat_disabled_output.golden index 5aea5ca0bb250..a7711a13b31f0 100644 --- a/cli/cmd/testdata/install_heartbeat_disabled_output.golden +++ b/cli/cmd/testdata/install_heartbeat_disabled_output.golden @@ -1662,209 +1662,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3238,7 +3035,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: diff --git a/cli/cmd/testdata/install_prometheus_overwrite.golden b/cli/cmd/testdata/install_prometheus_overwrite.golden index fa00a8b4cae0b..c1043bca8717a 100644 --- a/cli/cmd/testdata/install_prometheus_overwrite.golden +++ b/cli/cmd/testdata/install_prometheus_overwrite.golden @@ -465,7 +465,7 @@ webhooks: name: linkerd-proxy-injector namespace: linkerd path: "/" - caBundle: cHJveHkgaW5qZWN0b3IgY3J0 + caBundle: cHJveHkgaW5qZWN0b3IgQ0EgYnVuZGxl failurePolicy: Ignore rules: - operations: [ "CREATE" ] @@ -528,8 +528,8 @@ metadata: linkerd.io/created-by: linkerd/cli dev-undefined type: Opaque data: - crt.pem: cHJveHkgaW5qZWN0b3IgY3J0 - key.pem: cHJveHkgaW5qZWN0b3Iga2V5 + crt.pem: cHJvZmlsZSB2YWxpZGF0b3IgY3J0 + key.pem: cHJvZmlsZSB2YWxpZGF0b3Iga2V5 --- apiVersion: admissionregistration.k8s.io/v1beta1 kind: ValidatingWebhookConfiguration @@ -551,7 +551,7 @@ webhooks: name: linkerd-sp-validator namespace: linkerd path: "/" - caBundle: cHJveHkgaW5qZWN0b3IgY3J0 + caBundle: cHJvZmlsZSB2YWxpZGF0b3IgQ0EgYnVuZGxl failurePolicy: Ignore rules: - operations: [ "CREATE" , "UPDATE" ] @@ -682,7 +682,7 @@ spec: service: name: linkerd-tap namespace: linkerd - caBundle: dGFwIGNydA== + caBundle: dGFwIENBIGJ1bmRsZQ== --- ### ### Control Plane PSP @@ -799,7 +799,7 @@ data: global: | {"linkerdNamespace":"linkerd","cniEnabled":false,"version":"install-control-plane-version","identityContext":{"trustDomain":"cluster.local","trustAnchorsPem":"-----BEGIN CERTIFICATE-----\nMIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy\nLmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE\nAxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0\nxtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364\n6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF\nBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE\nAiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv\nOLO4Zsk1XrGZHGsmyiEyvYF9lpY=\n-----END CERTIFICATE-----\n","issuanceLifetime":"86400s","clockSkewAllowance":"20s","scheme":"linkerd.io/tls"},"autoInjectContext":null,"omitWebhookSideEffects":false,"clusterDomain":"cluster.local"} proxy: | - {"proxyImage":{"imageName":"gcr.io/linkerd-io/proxy","pullPolicy":"IfNotPresent"},"proxyInitImage":{"imageName":"gcr.io/linkerd-io/proxy-init","pullPolicy":"IfNotPresent"},"controlPort":{"port":4190},"ignoreInboundPorts":[],"ignoreOutboundPorts":[],"inboundPort":{"port":4143},"adminPort":{"port":4191},"outboundPort":{"port":4140},"resource":{"requestCpu":"","requestMemory":"","limitCpu":"","limitMemory":""},"proxyUid":"2102","logLevel":{"level":"warn,linkerd=info"},"disableExternalProfiles":true,"proxyVersion":"install-proxy-version","proxyInitImageVersion":"v1.3.2","debugImage":{"imageName":"gcr.io/linkerd-io/debug","pullPolicy":"IfNotPresent"},"debugImageVersion":"install-debug-version"} + {"proxyImage":{"imageName":"gcr.io/linkerd-io/proxy","pullPolicy":"IfNotPresent"},"proxyInitImage":{"imageName":"gcr.io/linkerd-io/proxy-init","pullPolicy":"IfNotPresent"},"controlPort":{"port":4190},"ignoreInboundPorts":[],"ignoreOutboundPorts":[],"inboundPort":{"port":4143},"adminPort":{"port":4191},"outboundPort":{"port":4140},"resource":{"requestCpu":"","requestMemory":"","limitCpu":"","limitMemory":""},"proxyUid":"2102","logLevel":{"level":"warn,linkerd=info"},"disableExternalProfiles":true,"proxyVersion":"install-proxy-version","proxyInitImageVersion":"v1.3.3","debugImage":{"imageName":"gcr.io/linkerd-io/debug","pullPolicy":"IfNotPresent"},"debugImageVersion":"install-debug-version"} install: | {"cliVersion":"dev-undefined","flags":[]} --- @@ -1006,7 +1006,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: @@ -1228,7 +1228,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: @@ -1447,7 +1447,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: @@ -1715,7 +1715,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: @@ -1913,7 +1913,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: @@ -2150,7 +2150,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: @@ -2375,7 +2375,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: @@ -2424,9 +2424,12 @@ metadata: linkerd.io/created-by: linkerd/cli dev-undefined data: values: |- + global: + grafanaUrl: "" grafana: enabled: true - image: gcr.io/linkerd-io/grafana + image: + name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: alertManagers: @@ -2734,7 +2737,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: @@ -3186,7 +3189,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: diff --git a/cli/cmd/testdata/install_proxy_ignores.golden b/cli/cmd/testdata/install_proxy_ignores.golden index 8f9fffab02c4d..d45c383adc60e 100644 --- a/cli/cmd/testdata/install_proxy_ignores.golden +++ b/cli/cmd/testdata/install_proxy_ignores.golden @@ -1751,209 +1751,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,22,8100-8102 - - --outbound-ports-to-ignore - - 443,5432 - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3327,7 +3124,7 @@ spec: - 4190,4191,22,8100-8102 - --outbound-ports-to-ignore - 443,5432 - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: diff --git a/cli/cmd/testdata/install_restricted_dashboard.golden b/cli/cmd/testdata/install_restricted_dashboard.golden index 3a31965aa4675..355bad8a46fe8 100644 --- a/cli/cmd/testdata/install_restricted_dashboard.golden +++ b/cli/cmd/testdata/install_restricted_dashboard.golden @@ -1686,209 +1686,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3262,7 +3059,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: diff --git a/cli/cmd/testdata/install_tracing.golden b/cli/cmd/testdata/install_tracing.golden index d2ac764779af2..ffab557576d08 100644 --- a/cli/cmd/testdata/install_tracing.golden +++ b/cli/cmd/testdata/install_tracing.golden @@ -1805,212 +1805,6 @@ spec: path: /ready port: 9995 securityContext: -<<<<<<< HEAD -======= - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: ->>>>>>> master runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/config @@ -3337,7 +3131,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: diff --git a/cli/cmd/testdata/install_tracing_overwrite.golden b/cli/cmd/testdata/install_tracing_overwrite.golden index 92ebd389f525c..5cd73708dd795 100644 --- a/cli/cmd/testdata/install_tracing_overwrite.golden +++ b/cli/cmd/testdata/install_tracing_overwrite.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1788,192 +1747,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1981,242 +1775,37 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /ready + port: 9995 securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 2103 + runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/config name: config @@ -2843,6 +2432,19 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus tracing: collector: image: overwrite-collector-image @@ -2853,85 +2455,100 @@ data: name: linkerd-jaeger --- ### -### linkerd-collector RBAC -### ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: overwrite-collector - namespace: linkerd - labels: - linkerd.io/control-plane-component: overwrite-collector - linkerd.io/control-plane-ns: linkerd ---- -### -### linkerd-jaeger RBAC +### Grafana RBAC ### --- kind: ServiceAccount apiVersion: v1 metadata: - name: linkerd-jaeger + name: linkerd-grafana namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd --- ### -### Tracing Collector Service +### Grafana ### --- -apiVersion: v1 kind: ConfigMap -metadata: - name: overwrite-collector-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: overwrite-collector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - linkerd-collector-config: | - receivers: - opencensus: - port: 55678 - zipkin: - port: 9411 - queued-exporters: - jaeger-all-in-one: - num-workers: 4 - queue-size: 100 - retry-on-failure: true - sender-type: jaeger-thrift-http - jaeger-thrift-http: - collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces - timeout: 5s ---- apiVersion: v1 -kind: Service metadata: - name: overwrite-collector + name: linkerd-grafana-config namespace: linkerd labels: - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - ports: - - name: opencensus - port: 55678 - protocol: TCP - targetPort: 55678 - - name: zipkin - port: 9411 - protocol: TCP - targetPort: 9411 +data: + grafana.ini: |- + instance_name = linkerd-grafana + + [server] + root_url = %(protocol)s://%(domain)s:/grafana/ + + [auth] + disable_login_form = true + + [auth.anonymous] + enabled = true + org_role = Editor + + [auth.basic] + enabled = false + + [analytics] + check_for_updates = false + + [panels] + disable_sanitize_html = true + + datasources.yaml: |- + apiVersion: 1 + datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 + isDefault: true + jsonData: + timeInterval: "5s" + version: 1 + editable: true + + dashboards.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards + homeDashboardId: linkerd-top-line +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-grafana + namespace: linkerd + labels: + linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP selector: - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: grafana + ports: + - name: http + port: 3000 + targetPort: 3000 --- apiVersion: apps/v1 kind: Deployment @@ -2939,61 +2556,61 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: overwrite-collector + app.kubernetes.io/name: grafana app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - name: overwrite-collector + name: linkerd-grafana namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: overwrite-collector - minReadySeconds: 5 - progressDeadlineSeconds: 120 + linkerd.io/proxy-deployment: linkerd-grafana template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version - prometheus.io/path: /metrics - prometheus.io/port: "8888" - prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-component: grafana linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: overwrite-collector + linkerd.io/proxy-deployment: linkerd-grafana spec: + nodeSelector: + beta.kubernetes.io/os: linux containers: - - command: - - /occollector_linux - - --config=/conf/linkerd-collector-config.yaml - env: - - name: GOGC - value: "80" - image: overwrite-collector-image + - env: + - name: GF_PATHS_DATA + value: /data + image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: / - port: 13133 - name: oc-collector + path: /api/health + port: 3000 + initialDelaySeconds: 30 + name: grafana ports: - - containerPort: 55678 - - containerPort: 9411 + - containerPort: 3000 + name: http readinessProbe: httpGet: - path: / - port: 13133 + path: /api/health + port: 3000 + securityContext: + runAsUser: 472 volumeMounts: - - mountPath: /conf - name: overwrite-collector-config-val + - mountPath: /data + name: data + - mountPath: /etc/grafana + name: grafana-config + readOnly: true - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3094,6 +2711,8 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 + - --outbound-ports-to-ignore + - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init @@ -3115,41 +2734,515 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: overwrite-collector + serviceAccountName: linkerd-grafana volumes: + - emptyDir: {} + name: data - configMap: items: - - key: linkerd-collector-config - path: linkerd-collector-config.yaml - name: overwrite-collector-config - name: overwrite-collector-config-val + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Tracing Jaeger Service +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- ### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsUser: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-prometheus + volumes: + - emptyDir: {} + name: data + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity +--- +### +### linkerd-collector RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: overwrite-collector + namespace: linkerd + labels: + linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-ns: linkerd +--- +### +### linkerd-jaeger RBAC +### +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-jaeger + namespace: linkerd + labels: + linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-ns: linkerd +--- +### +### Tracing Collector Service +### +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: overwrite-collector-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: overwrite-collector + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + linkerd-collector-config: | + receivers: + opencensus: + port: 55678 + zipkin: + port: 9411 + queued-exporters: + jaeger-all-in-one: + num-workers: 4 + queue-size: 100 + retry-on-failure: true + sender-type: jaeger-thrift-http + jaeger-thrift-http: + collector-endpoint: http://linkerd-jaeger.linkerd:14268/api/traces + timeout: 5s --- apiVersion: v1 kind: Service metadata: - name: linkerd-jaeger + name: overwrite-collector namespace: linkerd labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: overwrite-collector linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP - selector: - linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: collection - port: 14268 - - name: ui - port: 16686 + - name: opencensus + port: 55678 + protocol: TCP + targetPort: 55678 + - name: zipkin + port: 9411 + protocol: TCP + targetPort: 9411 + selector: + linkerd.io/control-plane-component: overwrite-collector --- apiVersion: apps/v1 kind: Deployment @@ -3157,20 +3250,22 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: linkerd-jaeger + app.kubernetes.io/name: overwrite-collector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: overwrite-collector linkerd.io/control-plane-ns: linkerd - name: linkerd-jaeger + name: overwrite-collector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: overwrite-collector linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: overwrite-collector + minReadySeconds: 5 + progressDeadlineSeconds: 120 template: metadata: annotations: @@ -3181,22 +3276,35 @@ spec: prometheus.io/port: "8888" prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: linkerd-jaeger + linkerd.io/control-plane-component: overwrite-collector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-jaeger + linkerd.io/proxy-deployment: overwrite-collector spec: containers: - - args: - - --query.base-path=/jaeger - image: jaegertracing/all-in-one:1.17.1 + - command: + - /occollector_linux + - --config=/conf/linkerd-collector-config.yaml + env: + - name: GOGC + value: "80" + image: overwrite-collector-image imagePullPolicy: IfNotPresent - name: jaeger + livenessProbe: + httpGet: + path: / + port: 13133 + name: oc-collector ports: - - containerPort: 14268 - name: collection - - containerPort: 16686 - name: ui + - containerPort: 55678 + - containerPort: 9411 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: overwrite-collector-config-val - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3297,8 +3405,6 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 - - --outbound-ports-to-ignore - - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init @@ -3320,108 +3426,41 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - dnsPolicy: ClusterFirst - serviceAccountName: linkerd-jaeger + serviceAccountName: overwrite-collector volumes: + - configMap: + items: + - key: linkerd-collector-config + path: linkerd-collector-config.yaml + name: overwrite-collector-config + name: overwrite-collector-config-val - emptyDir: medium: Memory name: linkerd-identity-end-entity --- ### -### Grafana RBAC -### ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-grafana - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd ---- -### -### Grafana +### Tracing Jaeger Service ### --- -kind: ConfigMap apiVersion: v1 -metadata: - name: linkerd-grafana-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: grafana - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - grafana.ini: |- - instance_name = linkerd-grafana - - [server] - root_url = %(protocol)s://%(domain)s:/grafana/ - - [auth] - disable_login_form = true - - [auth.anonymous] - enabled = true - org_role = Editor - - [auth.basic] - enabled = false - - [analytics] - check_for_updates = false - - [panels] - disable_sanitize_html = true - - datasources.yaml: |- - apiVersion: 1 - datasources: - - name: prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - isDefault: true - jsonData: - timeInterval: "5s" - version: 1 - editable: true - - dashboards.yaml: |- - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards - homeDashboardId: linkerd-top-line ---- kind: Service -apiVersion: v1 metadata: - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: type: ClusterIP selector: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger ports: - - name: http - port: 3000 - targetPort: 3000 + - name: collection + port: 14268 + - name: ui + port: 16686 --- apiVersion: apps/v1 kind: Deployment @@ -3429,61 +3468,46 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: grafana + app.kubernetes.io/name: linkerd-jaeger app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - name: linkerd-grafana + name: linkerd-jaeger namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger template: metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version + prometheus.io/path: /metrics + prometheus.io/port: "8888" + prometheus.io/scrape: "true" labels: - linkerd.io/control-plane-component: grafana + linkerd.io/control-plane-component: linkerd-jaeger linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-grafana + linkerd.io/proxy-deployment: linkerd-jaeger spec: - nodeSelector: - beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - --query.base-path=/jaeger + image: jaegertracing/all-in-one:1.17.1 imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 30 - name: grafana + name: jaeger ports: - - containerPort: 3000 - name: http - readinessProbe: - httpGet: - path: /api/health - port: 3000 - securityContext: - runAsUser: 472 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/grafana - name: grafana-config - readOnly: true + - containerPort: 14268 + name: collection + - containerPort: 16686 + name: ui - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info @@ -3607,20 +3631,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-grafana + dnsPolicy: ClusterFirst + serviceAccountName: linkerd-jaeger volumes: - - emptyDir: {} - name: data - - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_add-on_controlplane.golden b/cli/cmd/testdata/upgrade_add-on_controlplane.golden index 0a5ab54afdbb6..1f229a0a97090 100644 --- a/cli/cmd/testdata/upgrade_add-on_controlplane.golden +++ b/cli/cmd/testdata/upgrade_add-on_controlplane.golden @@ -1182,205 +1182,6 @@ metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: -<<<<<<< HEAD -======= - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 2103 - volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - - mountPath: /var/run/linkerd/tls - name: tls - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-proxy-injector - volumes: - - configMap: - name: linkerd-config - name: config - - name: tls - secret: - secretName: linkerd-proxy-injector-tls - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-proxy-injector - namespace: linkerd - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: ->>>>>>> master type: ClusterIP selector: linkerd.io/control-plane-component: proxy-injector @@ -2129,7 +1930,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: diff --git a/cli/cmd/testdata/upgrade_add-on_overwrite.golden b/cli/cmd/testdata/upgrade_add-on_overwrite.golden index f08c012da6e4e..d7944d451ecef 100644 --- a/cli/cmd/testdata/upgrade_add-on_overwrite.golden +++ b/cli/cmd/testdata/upgrade_add-on_overwrite.golden @@ -1813,214 +1813,6 @@ spec: path: /ready port: 9995 securityContext: -<<<<<<< HEAD -======= - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: ->>>>>>> master runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/config @@ -2937,13 +2729,9 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 -<<<<<<< HEAD - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 -======= image: gcr.io/linkerd-io/proxy-init:v1.3.3 ->>>>>>> master imagePullPolicy: IfNotPresent name: linkerd-init resources: @@ -3363,7 +3151,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: @@ -3639,13 +3427,7 @@ spec: - "2102" - --inbound-ports-to-ignore - 4190,4191 -<<<<<<< HEAD - image: gcr.io/linkerd-io/proxy-init:v1.3.2 -======= - - --outbound-ports-to-ignore - - "443" image: gcr.io/linkerd-io/proxy-init:v1.3.3 ->>>>>>> master imagePullPolicy: IfNotPresent name: linkerd-init resources: diff --git a/cli/cmd/testdata/upgrade_add_add-on.golden b/cli/cmd/testdata/upgrade_add_add-on.golden index 23bea49bbf481..b64371b282590 100644 --- a/cli/cmd/testdata/upgrade_add_add-on.golden +++ b/cli/cmd/testdata/upgrade_add_add-on.golden @@ -1813,214 +1813,6 @@ spec: path: /ready port: 9995 securityContext: -<<<<<<< HEAD -======= - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: ->>>>>>> master runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/config @@ -3357,7 +3149,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: diff --git a/cli/cmd/testdata/upgrade_default.golden b/cli/cmd/testdata/upgrade_default.golden index d5ebf2cbe4a7b..e29f4b228bb7b 100644 --- a/cli/cmd/testdata/upgrade_default.golden +++ b/cli/cmd/testdata/upgrade_default.golden @@ -1812,214 +1812,6 @@ spec: path: /ready port: 9995 securityContext: -<<<<<<< HEAD -======= - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,2525-2527,2529 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: ->>>>>>> master runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/config @@ -3350,7 +3142,7 @@ spec: - 4190,4191,2525-2527,2529 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: diff --git a/cli/cmd/testdata/upgrade_external_issuer.golden b/cli/cmd/testdata/upgrade_external_issuer.golden index 7f2edb22e1c1a..7bb375f1e5541 100644 --- a/cli/cmd/testdata/upgrade_external_issuer.golden +++ b/cli/cmd/testdata/upgrade_external_issuer.golden @@ -1745,211 +1745,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3333,7 +3128,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: diff --git a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml index 6bb5331f98ecc..5d23f52329c84 100644 --- a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1795,192 +1754,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1988,244 +1782,37 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /ready + port: 9995 securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 2103 + runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/config name: config @@ -2858,6 +2445,19 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus tracing: enabled: false --- @@ -3160,3 +2760,416 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsUser: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw + JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 + MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r + ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg + Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ + Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB + /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe + aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC + IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R + SxZLbJKt6SJIIY9dw5gzQpUQR2U= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-prometheus + volumes: + - emptyDir: {} + name: data + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_grafana_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_disabled.yaml index 458c644479673..cb77f61bc3576 100644 --- a/cli/cmd/testdata/upgrade_grafana_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_disabled.yaml @@ -2434,6 +2434,8 @@ metadata: linkerd.io/created-by: linkerd/cli dev-undefined data: values: |- + global: + grafanaUrl: "" grafana: enabled: false prometheus: @@ -2864,29 +2866,3 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - global: - grafanaUrl: "" - grafana: - enabled: false - tracing: - enabled: false diff --git a/cli/cmd/testdata/upgrade_grafana_enabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled.yaml index 0b34a4e289459..5d23f52329c84 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled.yaml @@ -1759,211 +1759,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3347,7 +3142,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: diff --git a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml index f4953a5acb933..cb77f61bc3576 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml @@ -2434,6 +2434,8 @@ metadata: linkerd.io/created-by: linkerd/cli dev-undefined data: values: |- + global: + grafanaUrl: "" grafana: enabled: false prometheus: @@ -2864,32 +2866,3 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity -<<<<<<< HEAD -======= - - name: tls - secret: - secretName: linkerd-tap-tls - ---- -### -### linkerd add-ons configuration -### ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-config-addons - namespace: linkerd - labels: - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - values: |- - global: - grafanaUrl: "" - grafana: - enabled: false - tracing: - enabled: false ->>>>>>> master diff --git a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml index 452c8501cb25c..f7819f9a5b965 100644 --- a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml @@ -1759,211 +1759,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3347,7 +3142,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: diff --git a/cli/cmd/testdata/upgrade_ha.golden b/cli/cmd/testdata/upgrade_ha.golden index 1379ffe919df4..6c9a5d3f9f1c0 100644 --- a/cli/cmd/testdata/upgrade_ha.golden +++ b/cli/cmd/testdata/upgrade_ha.golden @@ -1887,249 +1887,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 3 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - strategy: - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: failure-domain.beta.kubernetes.io/zone - weight: 100 - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: kubernetes.io/hostname - containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus - ports: - - containerPort: 9090 - name: admin-http - readinessProbe: - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - resources: - limits: - cpu: "4" - memory: "8192Mi" - requests: - cpu: "300m" - memory: "300Mi" - securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - limits: - cpu: "1" - memory: "250Mi" - requests: - cpu: "100m" - memory: "20Mi" - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3661,7 +3418,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: diff --git a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden index d608203044b2e..37a08f48a4f81 100644 --- a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden +++ b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1795,192 +1754,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1988,244 +1782,37 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /ready + port: 9995 securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,2525-2527,2529 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 2103 + runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/config name: config @@ -2858,6 +2445,19 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus tracing: enabled: false --- @@ -3160,3 +2760,416 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsUser: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw + JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 + MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r + ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg + Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ + Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB + /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe + aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC + IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R + SxZLbJKt6SJIIY9dw5gzQpUQR2U= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191,2525-2527,2529 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-prometheus + volumes: + - emptyDir: {} + name: data + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_nothing_addon.yaml b/cli/cmd/testdata/upgrade_nothing_addon.yaml index 0b34a4e289459..5d23f52329c84 100644 --- a/cli/cmd/testdata/upgrade_nothing_addon.yaml +++ b/cli/cmd/testdata/upgrade_nothing_addon.yaml @@ -1759,211 +1759,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3347,7 +3142,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: diff --git a/cli/cmd/testdata/upgrade_overwrite_issuer.golden b/cli/cmd/testdata/upgrade_overwrite_issuer.golden index a331dcaa4a0d2..96fd6e748739b 100644 --- a/cli/cmd/testdata/upgrade_overwrite_issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_issuer.golden @@ -1751,209 +1751,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3327,7 +3124,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden index ab317dca28024..761d5d3434820 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden @@ -1737,209 +1737,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3313,7 +3110,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden index a331dcaa4a0d2..96fd6e748739b 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden @@ -1751,209 +1751,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3327,7 +3124,7 @@ spec: - 4190,4191 - --outbound-ports-to-ignore - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.2 + image: gcr.io/linkerd-io/proxy-init:v1.3.3 imagePullPolicy: IfNotPresent name: linkerd-init resources: diff --git a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden index d0817cd7e5647..3a056af2cdc8a 100644 --- a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden +++ b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden @@ -377,47 +377,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -1795,192 +1754,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1988,244 +1782,37 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: UPGRADE-PROXY-VERSION labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level=info + image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /ready + port: 9995 securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,2525-2527,2529 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 2103 + runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/config name: config @@ -2858,6 +2445,19 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 + name: linkerd-prometheus tracing: enabled: false --- @@ -3160,3 +2760,416 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: UPGRADE-PROXY-VERSION + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsUser: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw + JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 + MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r + ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg + Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ + Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB + /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe + aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC + IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R + SxZLbJKt6SJIIY9dw5gzQpUQR2U= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191,2525-2527,2529 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-prometheus + volumes: + - emptyDir: {} + name: data + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity From 347354cd08eaaa5098d73f6c88bc4cb2082e1cdf Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Mon, 15 Jun 2020 17:04:42 +0530 Subject: [PATCH 21/42] remove unfixed changes Signed-off-by: Tarun Pothulapati --- pkg/charts/linkerd2/values_test.go | 33 ++--- pkg/healthcheck/healthcheck_test.go | 137 ++++++++++++++++++- test/serviceaccounts/serviceaccounts_test.go | 1 + 3 files changed, 150 insertions(+), 21 deletions(-) diff --git a/pkg/charts/linkerd2/values_test.go b/pkg/charts/linkerd2/values_test.go index a25d0647299a4..78627e618b3a8 100644 --- a/pkg/charts/linkerd2/values_test.go +++ b/pkg/charts/linkerd2/values_test.go @@ -14,25 +14,20 @@ func TestNewValues(t *testing.T) { testVersion := "linkerd-dev" expected := &Values{ - Stage: "", - ControllerImage: "gcr.io/linkerd-io/controller", - ControllerImageVersion: testVersion, - WebImage: "gcr.io/linkerd-io/web", - ControllerReplicas: 1, - ControllerLogLevel: "info", - PrometheusLogLevel: "info", - PrometheusExtraArgs: map[string]string{}, - PrometheusAlertmanagers: []interface{}{}, - PrometheusRuleConfigMapMounts: []PrometheusRuleConfigMapMount{}, - ControllerUID: 2103, - EnableH2Upgrade: true, - EnablePodAntiAffinity: false, - WebhookFailurePolicy: "Ignore", - OmitWebhookSideEffects: false, - RestrictDashboardPrivileges: false, - DisableHeartBeat: false, - HeartbeatSchedule: "0 0 * * *", - InstallNamespace: true, + Stage: "", + ControllerImage: "gcr.io/linkerd-io/controller", + WebImage: "gcr.io/linkerd-io/web", + ControllerReplicas: 1, + ControllerLogLevel: "info", + ControllerUID: 2103, + EnableH2Upgrade: true, + EnablePodAntiAffinity: false, + WebhookFailurePolicy: "Ignore", + OmitWebhookSideEffects: false, + RestrictDashboardPrivileges: false, + DisableHeartBeat: false, + HeartbeatSchedule: "0 0 * * *", + InstallNamespace: true, Prometheus: Prometheus{ "enabled": true, "name": "linkerd-prometheus", diff --git a/pkg/healthcheck/healthcheck_test.go b/pkg/healthcheck/healthcheck_test.go index 0792059aa35c5..684a7edcbe476 100644 --- a/pkg/healthcheck/healthcheck_test.go +++ b/pkg/healthcheck/healthcheck_test.go @@ -570,7 +570,7 @@ metadata: }, []string{ "linkerd-config control plane Namespace exists", - "linkerd-config control plane ClusterRoles exist: missing ClusterRoles: linkerd-test-ns-controller, linkerd-test-ns-identity, linkerd-test-ns-proxy-injector, linkerd-test-ns-sp-validator, linkerd-test-ns-tap", + "linkerd-config control plane ClusterRoles exist: missing ClusterRoles: linkerd-test-ns-controller, linkerd-test-ns-identity, linkerd-test-ns-prometheus, linkerd-test-ns-proxy-injector, linkerd-test-ns-sp-validator, linkerd-test-ns-tap", }, }, { @@ -599,6 +599,14 @@ metadata: ` kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-test-ns-prometheus + labels: + linkerd.io/control-plane-ns: test-ns +`, + ` +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-test-ns-proxy-injector labels: @@ -624,7 +632,7 @@ metadata: []string{ "linkerd-config control plane Namespace exists", "linkerd-config control plane ClusterRoles exist", - "linkerd-config control plane ClusterRoleBindings exist: missing ClusterRoleBindings: linkerd-test-ns-controller, linkerd-test-ns-identity, linkerd-test-ns-proxy-injector, linkerd-test-ns-sp-validator, linkerd-test-ns-tap", + "linkerd-config control plane ClusterRoleBindings exist: missing ClusterRoleBindings: linkerd-test-ns-controller, linkerd-test-ns-identity, linkerd-test-ns-prometheus, linkerd-test-ns-proxy-injector, linkerd-test-ns-sp-validator, linkerd-test-ns-tap", }, }, { @@ -653,6 +661,14 @@ metadata: ` kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-test-ns-prometheus + labels: + linkerd.io/control-plane-ns: test-ns +`, + ` +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-test-ns-proxy-injector labels: @@ -693,6 +709,14 @@ metadata: ` kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-test-ns-prometheus + labels: + linkerd.io/control-plane-ns: test-ns +`, + ` +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-test-ns-proxy-injector labels: @@ -744,6 +768,15 @@ metadata: ` kind: ServiceAccount apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: test-ns + labels: + linkerd.io/control-plane-ns: test-ns +`, + ` +kind: ServiceAccount +apiVersion: v1 metadata: name: linkerd-proxy-injector namespace: test-ns @@ -839,6 +872,14 @@ metadata: ` kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-test-ns-prometheus + labels: + linkerd.io/control-plane-ns: test-ns +`, + ` +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-test-ns-proxy-injector labels: @@ -879,6 +920,14 @@ metadata: ` kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-test-ns-prometheus + labels: + linkerd.io/control-plane-ns: test-ns +`, + ` +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-test-ns-proxy-injector labels: @@ -930,6 +979,15 @@ metadata: ` kind: ServiceAccount apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: test-ns + labels: + linkerd.io/control-plane-ns: test-ns +`, + ` +kind: ServiceAccount +apiVersion: v1 metadata: name: linkerd-proxy-injector namespace: test-ns @@ -1034,6 +1092,14 @@ metadata: ` kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-test-ns-prometheus + labels: + linkerd.io/control-plane-ns: test-ns +`, + ` +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-test-ns-proxy-injector labels: @@ -1074,6 +1140,14 @@ metadata: ` kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-test-ns-prometheus + labels: + linkerd.io/control-plane-ns: test-ns +`, + ` +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-test-ns-proxy-injector labels: @@ -1125,6 +1199,15 @@ metadata: ` kind: ServiceAccount apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: test-ns + labels: + linkerd.io/control-plane-ns: test-ns +`, + ` +kind: ServiceAccount +apiVersion: v1 metadata: name: linkerd-proxy-injector namespace: test-ns @@ -1238,6 +1321,14 @@ metadata: ` kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-test-ns-prometheus + labels: + linkerd.io/control-plane-ns: test-ns +`, + ` +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-test-ns-proxy-injector labels: @@ -1278,6 +1369,14 @@ metadata: ` kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-test-ns-prometheus + labels: + linkerd.io/control-plane-ns: test-ns +`, + ` +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-test-ns-proxy-injector labels: @@ -1329,6 +1428,15 @@ metadata: ` kind: ServiceAccount apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: test-ns + labels: + linkerd.io/control-plane-ns: test-ns +`, + ` +kind: ServiceAccount +apiVersion: v1 metadata: name: linkerd-proxy-injector namespace: test-ns @@ -1451,6 +1559,14 @@ metadata: ` kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-test-ns-prometheus + labels: + linkerd.io/control-plane-ns: test-ns +`, + ` +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-test-ns-proxy-injector labels: @@ -1491,6 +1607,14 @@ metadata: ` kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-test-ns-prometheus + labels: + linkerd.io/control-plane-ns: test-ns +`, + ` +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-test-ns-proxy-injector labels: @@ -1542,6 +1666,15 @@ metadata: ` kind: ServiceAccount apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: test-ns + labels: + linkerd.io/control-plane-ns: test-ns +`, + ` +kind: ServiceAccount +apiVersion: v1 metadata: name: linkerd-proxy-injector namespace: test-ns diff --git a/test/serviceaccounts/serviceaccounts_test.go b/test/serviceaccounts/serviceaccounts_test.go index d965d0566033a..aa198ff925d27 100644 --- a/test/serviceaccounts/serviceaccounts_test.go +++ b/test/serviceaccounts/serviceaccounts_test.go @@ -66,6 +66,7 @@ func TestServiceAccountsMatch(t *testing.T) { "error retrieving list of linkerd-psp rolebindings: %s", err) } saNamesPSP := strings.Split(res, " ") + if len(saNamesPSP) < len(expectedNames) || !namesMatch(saNamesPSP) { t.Fatalf( "The service accounts in the linkerd-psp rolebindings don't match the expected list: %s", From 79f7a784097d42543d64e2006d53c3ed1fd385c1 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Tue, 16 Jun 2020 15:04:41 +0530 Subject: [PATCH 22/42] remove .Prometheus.name variable Signed-off-by: Tarun Pothulapati --- .../prometheus/templates/prometheus-rbac.yaml | 10 +++++----- charts/add-ons/prometheus/templates/prometheus.yaml | 12 ++++++------ charts/linkerd2/README.md | 1 - charts/linkerd2/values.yaml | 1 - cli/cmd/install_test.go | 1 - cli/cmd/testdata/install_addon_control-plane.golden | 1 - cli/cmd/testdata/install_control-plane.golden | 1 - .../install_controlplane_tracing_output.golden | 1 - cli/cmd/testdata/install_custom_registry.golden | 1 - cli/cmd/testdata/install_default.golden | 1 - cli/cmd/testdata/install_grafana_existing.golden | 1 - cli/cmd/testdata/install_ha_output.golden | 1 - .../testdata/install_ha_with_overrides_output.golden | 1 - .../install_heartbeat_disabled_output.golden | 1 - cli/cmd/testdata/install_helm_output.golden | 1 - cli/cmd/testdata/install_helm_output_addons.golden | 1 - cli/cmd/testdata/install_helm_output_ha.golden | 1 - cli/cmd/testdata/install_no_init_container.golden | 1 - cli/cmd/testdata/install_output.golden | 6 +++--- cli/cmd/testdata/install_prometheus_overwrite.golden | 1 - cli/cmd/testdata/install_proxy_ignores.golden | 1 - cli/cmd/testdata/install_restricted_dashboard.golden | 1 - cli/cmd/testdata/install_tracing.golden | 1 - cli/cmd/testdata/install_tracing_overwrite.golden | 1 - cli/cmd/testdata/upgrade_add-on_controlplane.golden | 1 - cli/cmd/testdata/upgrade_add-on_overwrite.golden | 1 - cli/cmd/testdata/upgrade_add_add-on.golden | 1 - cli/cmd/testdata/upgrade_default.golden | 1 - cli/cmd/testdata/upgrade_external_issuer.golden | 1 - .../testdata/upgrade_grafana_addon_overwrite.yaml | 1 - cli/cmd/testdata/upgrade_grafana_disabled.yaml | 1 - cli/cmd/testdata/upgrade_grafana_enabled.yaml | 1 - .../testdata/upgrade_grafana_enabled_disabled.yaml | 1 - cli/cmd/testdata/upgrade_grafana_overwrite.yaml | 1 - cli/cmd/testdata/upgrade_ha.golden | 1 - .../testdata/upgrade_keep_webhook_cabundle.golden | 1 - cli/cmd/testdata/upgrade_nothing_addon.yaml | 1 - cli/cmd/testdata/upgrade_overwrite_issuer.golden | 1 - ...de_overwrite_trust_anchors-external-issuer.golden | 1 - .../testdata/upgrade_overwrite_trust_anchors.golden | 1 - .../testdata/upgrade_two_level_webhook_cert.golden | 1 - pkg/charts/linkerd2/values_test.go | 2 -- 42 files changed, 14 insertions(+), 54 deletions(-) diff --git a/charts/add-ons/prometheus/templates/prometheus-rbac.yaml b/charts/add-ons/prometheus/templates/prometheus-rbac.yaml index d3dd458ca0db4..9fa4c0d62dbeb 100644 --- a/charts/add-ons/prometheus/templates/prometheus-rbac.yaml +++ b/charts/add-ons/prometheus/templates/prometheus-rbac.yaml @@ -6,7 +6,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: {{.Values.global.namespace}}-{{.Values.name}} + name: linkerd-{{.Values.global.namespace}}-prometheus labels: {{.Values.global.controllerComponentLabel}}: prometheus {{.Values.global.controllerNamespaceLabel}}: {{.Values.global.namespace}} @@ -18,23 +18,23 @@ rules: kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: {{.Values.global.namespace}}-{{.Values.name}} + name: linkerd-{{.Values.global.namespace}}-prometheus labels: {{.Values.global.controllerComponentLabel}}: prometheus {{.Values.global.controllerNamespaceLabel}}: {{.Values.global.namespace}} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: {{.Values.global.namespace}}-{{.Values.name}} + name: linkerd-{{.Values.global.namespace}}-prometheus subjects: - kind: ServiceAccount - name: {{.Values.name}} + name: linkerd-prometheus namespace: {{.Values.global.namespace}} --- kind: ServiceAccount apiVersion: v1 metadata: - name: {{.Values.name}} + name: linkerd-prometheus namespace: {{.Values.global.namespace}} labels: {{.Values.global.controllerComponentLabel}}: prometheus diff --git a/charts/add-ons/prometheus/templates/prometheus.yaml b/charts/add-ons/prometheus/templates/prometheus.yaml index a7fe439ffe7a9..c920f79cbe7ee 100644 --- a/charts/add-ons/prometheus/templates/prometheus.yaml +++ b/charts/add-ons/prometheus/templates/prometheus.yaml @@ -6,7 +6,7 @@ kind: ConfigMap apiVersion: v1 metadata: - name: {{.Values.name}}-config + name: linkerd-prometheus-config namespace: {{.Values.global.namespace}} labels: {{.Values.global.controllerComponentLabel}}: prometheus @@ -171,7 +171,7 @@ data: kind: Service apiVersion: v1 metadata: - name: {{.Values.name}} + name: linkerd-prometheus namespace: {{.Values.global.namespace}} labels: {{.Values.global.controllerComponentLabel}}: prometheus @@ -188,7 +188,7 @@ spec: targetPort: 9090 --- {{ $_ := set .Values.global.proxy "workloadKind" "deployment" -}} -{{ $_ := set .Values.global.proxy "component" .Values.name -}} +{{ $_ := set .Values.global.proxy "component" "linkerd-prometheus" -}} {{ include "linkerd.proxy.validation" .Values.global.proxy -}} apiVersion: apps/v1 kind: Deployment @@ -201,7 +201,7 @@ metadata: app.kubernetes.io/version: {{default .Values.global.linkerdVersion .Values.global.controllerImageVersion}} {{.Values.global.controllerComponentLabel}}: prometheus {{.Values.global.controllerNamespaceLabel}}: {{.Values.global.namespace}} - name: {{.Values.name}} + name: linkerd-prometheus namespace: {{.Values.global.namespace}} spec: replicas: 1 @@ -268,7 +268,7 @@ spec: initContainers: - {{- include "partials.proxy-init" . | indent 8 | trimPrefix (repeat 7 " ") }} {{ end -}} - serviceAccountName: {{.Values.name}} + serviceAccountName: linkerd-prometheus volumes: {{- range .Values.ruleConfigMapMounts }} - name: {{ .name }} @@ -278,7 +278,7 @@ spec: - emptyDir: {} name: data - configMap: - name: {{.Values.name}}-config + name: linkerd-prometheus-config name: prometheus-config {{ if .Values.global.controlPlaneTracing -}} - {{- include "partials.proxy.volumes.labels" . | indent 8 | trimPrefix (repeat 7 " ") }} diff --git a/charts/linkerd2/README.md b/charts/linkerd2/README.md index cd0409e3f740a..ba812b9abe545 100644 --- a/charts/linkerd2/README.md +++ b/charts/linkerd2/README.md @@ -197,7 +197,6 @@ The following table lists the configurable parameters for the Prometheus Add-On. | `prometheus.enabled` | Flag to enable prometheus instance to be installed | `true` | `prometheus.globalConfig` | The global configuration specifies parameters that are valid in all other configuration contexts. | `scrape_interval: 10s, scrape_timeout: 10s, evaluation_interval: 10s` | | `prometheus.image` | Docker image for the prometheus instance | `prom/prometheus:v2.15.2` | -| `prometheus.name` | Name of the prometheus instance Service | `linkerd-prometheus` | | `prometheus.resources.cpu.limit` | Maximum amount of CPU units that the prometheus container can use || | `prometheus.resources.cpu.request` | Amount of CPU units that the prometheus container requests || | `prometheus.resources.memory.limit` | Maximum amount of memory that prometheus container can use || diff --git a/charts/linkerd2/values.yaml b/charts/linkerd2/values.yaml index 41bb92590770f..73ecff6521716 100644 --- a/charts/linkerd2/values.yaml +++ b/charts/linkerd2/values.yaml @@ -204,7 +204,6 @@ grafana: prometheus: enabled: true - name: linkerd-prometheus image: prom/prometheus:v2.15.2 args: storage.tsdb.path: /data diff --git a/cli/cmd/install_test.go b/cli/cmd/install_test.go index c11786206849c..91f607b25f445 100644 --- a/cli/cmd/install_test.go +++ b/cli/cmd/install_test.go @@ -131,7 +131,6 @@ func TestRender(t *testing.T) { Prometheus: charts.Prometheus{ "enabled": true, "image": "PrometheusImage", - "name": "linkerd-prometheus", }, Tracing: map[string]interface{}{ "enabled": false, diff --git a/cli/cmd/testdata/install_addon_control-plane.golden b/cli/cmd/testdata/install_addon_control-plane.golden index 52e917a97c988..aff6853de4980 100644 --- a/cli/cmd/testdata/install_addon_control-plane.golden +++ b/cli/cmd/testdata/install_addon_control-plane.golden @@ -1658,7 +1658,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: collector: image: omnition/opencensus-collector:0.1.11 diff --git a/cli/cmd/testdata/install_control-plane.golden b/cli/cmd/testdata/install_control-plane.golden index f124883fc3d4c..029be3025091a 100644 --- a/cli/cmd/testdata/install_control-plane.golden +++ b/cli/cmd/testdata/install_control-plane.golden @@ -1657,7 +1657,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: enabled: false --- diff --git a/cli/cmd/testdata/install_controlplane_tracing_output.golden b/cli/cmd/testdata/install_controlplane_tracing_output.golden index f0339893a4150..ed14849bdc74a 100644 --- a/cli/cmd/testdata/install_controlplane_tracing_output.golden +++ b/cli/cmd/testdata/install_controlplane_tracing_output.golden @@ -2546,7 +2546,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: enabled: false --- diff --git a/cli/cmd/testdata/install_custom_registry.golden b/cli/cmd/testdata/install_custom_registry.golden index 33122a06af0ed..8a1a84a43cd35 100644 --- a/cli/cmd/testdata/install_custom_registry.golden +++ b/cli/cmd/testdata/install_custom_registry.golden @@ -2443,7 +2443,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: enabled: false --- diff --git a/cli/cmd/testdata/install_default.golden b/cli/cmd/testdata/install_default.golden index dcaa709f20788..8f1f581ce08bb 100644 --- a/cli/cmd/testdata/install_default.golden +++ b/cli/cmd/testdata/install_default.golden @@ -2443,7 +2443,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: enabled: false --- diff --git a/cli/cmd/testdata/install_grafana_existing.golden b/cli/cmd/testdata/install_grafana_existing.golden index 2d7f7d3331d7a..75b2eda713108 100644 --- a/cli/cmd/testdata/install_grafana_existing.golden +++ b/cli/cmd/testdata/install_grafana_existing.golden @@ -2437,7 +2437,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: enabled: false --- diff --git a/cli/cmd/testdata/install_ha_output.golden b/cli/cmd/testdata/install_ha_output.golden index e28c0f0802208..1f3eca9e1b075 100644 --- a/cli/cmd/testdata/install_ha_output.golden +++ b/cli/cmd/testdata/install_ha_output.golden @@ -2686,7 +2686,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus resources: cpu: limit: "4" diff --git a/cli/cmd/testdata/install_ha_with_overrides_output.golden b/cli/cmd/testdata/install_ha_with_overrides_output.golden index 98d8c69955acf..54f285e44051f 100644 --- a/cli/cmd/testdata/install_ha_with_overrides_output.golden +++ b/cli/cmd/testdata/install_ha_with_overrides_output.golden @@ -2686,7 +2686,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus resources: cpu: limit: "4" diff --git a/cli/cmd/testdata/install_heartbeat_disabled_output.golden b/cli/cmd/testdata/install_heartbeat_disabled_output.golden index a7711a13b31f0..311cb34d319ff 100644 --- a/cli/cmd/testdata/install_heartbeat_disabled_output.golden +++ b/cli/cmd/testdata/install_heartbeat_disabled_output.golden @@ -2354,7 +2354,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: enabled: false --- diff --git a/cli/cmd/testdata/install_helm_output.golden b/cli/cmd/testdata/install_helm_output.golden index 6e0f2cf9be67b..9ab34db07ce97 100644 --- a/cli/cmd/testdata/install_helm_output.golden +++ b/cli/cmd/testdata/install_helm_output.golden @@ -2489,7 +2489,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: enabled: false --- diff --git a/cli/cmd/testdata/install_helm_output_addons.golden b/cli/cmd/testdata/install_helm_output_addons.golden index fe08f29f8f7cd..df06ad6fd0e83 100644 --- a/cli/cmd/testdata/install_helm_output_addons.golden +++ b/cli/cmd/testdata/install_helm_output_addons.golden @@ -2490,7 +2490,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: collector: image: omnition/opencensus-collector:0.1.11 diff --git a/cli/cmd/testdata/install_helm_output_ha.golden b/cli/cmd/testdata/install_helm_output_ha.golden index b071b7deab177..2bcb46295eb6e 100644 --- a/cli/cmd/testdata/install_helm_output_ha.golden +++ b/cli/cmd/testdata/install_helm_output_ha.golden @@ -2732,7 +2732,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus resources: cpu: limit: "4" diff --git a/cli/cmd/testdata/install_no_init_container.golden b/cli/cmd/testdata/install_no_init_container.golden index 178a0d874dda0..412c14feb0993 100644 --- a/cli/cmd/testdata/install_no_init_container.golden +++ b/cli/cmd/testdata/install_no_init_container.golden @@ -2209,7 +2209,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: enabled: false --- diff --git a/cli/cmd/testdata/install_output.golden b/cli/cmd/testdata/install_output.golden index 857606bbfd208..05e373d97c33d 100644 --- a/cli/cmd/testdata/install_output.golden +++ b/cli/cmd/testdata/install_output.golden @@ -2735,7 +2735,7 @@ spec: kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: Namespace-linkerd-prometheus + name: linkerd-Namespace-prometheus labels: ControllerComponentLabel: prometheus ControllerNamespaceLabel: Namespace @@ -2747,14 +2747,14 @@ rules: kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: Namespace-linkerd-prometheus + name: linkerd-Namespace-prometheus labels: ControllerComponentLabel: prometheus ControllerNamespaceLabel: Namespace roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: Namespace-linkerd-prometheus + name: linkerd-Namespace-prometheus subjects: - kind: ServiceAccount name: linkerd-prometheus diff --git a/cli/cmd/testdata/install_prometheus_overwrite.golden b/cli/cmd/testdata/install_prometheus_overwrite.golden index c1043bca8717a..6cde822cf3161 100644 --- a/cli/cmd/testdata/install_prometheus_overwrite.golden +++ b/cli/cmd/testdata/install_prometheus_overwrite.golden @@ -2454,7 +2454,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus remoteWrite: - url: http://cortex-service.default:9009/api/prom/push ruleConfigMapMounts: diff --git a/cli/cmd/testdata/install_proxy_ignores.golden b/cli/cmd/testdata/install_proxy_ignores.golden index d45c383adc60e..19d9b015bd0b3 100644 --- a/cli/cmd/testdata/install_proxy_ignores.golden +++ b/cli/cmd/testdata/install_proxy_ignores.golden @@ -2443,7 +2443,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: enabled: false --- diff --git a/cli/cmd/testdata/install_restricted_dashboard.golden b/cli/cmd/testdata/install_restricted_dashboard.golden index 355bad8a46fe8..5d0293c670df5 100644 --- a/cli/cmd/testdata/install_restricted_dashboard.golden +++ b/cli/cmd/testdata/install_restricted_dashboard.golden @@ -2378,7 +2378,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: enabled: false --- diff --git a/cli/cmd/testdata/install_tracing.golden b/cli/cmd/testdata/install_tracing.golden index ffab557576d08..f836d71f9c0bc 100644 --- a/cli/cmd/testdata/install_tracing.golden +++ b/cli/cmd/testdata/install_tracing.golden @@ -2444,7 +2444,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: collector: image: omnition/opencensus-collector:0.1.11 diff --git a/cli/cmd/testdata/install_tracing_overwrite.golden b/cli/cmd/testdata/install_tracing_overwrite.golden index 5cd73708dd795..b87eadbd5e7ad 100644 --- a/cli/cmd/testdata/install_tracing_overwrite.golden +++ b/cli/cmd/testdata/install_tracing_overwrite.golden @@ -2444,7 +2444,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: collector: image: overwrite-collector-image diff --git a/cli/cmd/testdata/upgrade_add-on_controlplane.golden b/cli/cmd/testdata/upgrade_add-on_controlplane.golden index 1f229a0a97090..f1c0cc5d5c143 100644 --- a/cli/cmd/testdata/upgrade_add-on_controlplane.golden +++ b/cli/cmd/testdata/upgrade_add-on_controlplane.golden @@ -1672,7 +1672,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: collector: image: omnition/opencensus-collector:0.1.11 diff --git a/cli/cmd/testdata/upgrade_add-on_overwrite.golden b/cli/cmd/testdata/upgrade_add-on_overwrite.golden index d7944d451ecef..c4c6a0bdda682 100644 --- a/cli/cmd/testdata/upgrade_add-on_overwrite.golden +++ b/cli/cmd/testdata/upgrade_add-on_overwrite.golden @@ -2458,7 +2458,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: collector: image: overwrite-collector-image diff --git a/cli/cmd/testdata/upgrade_add_add-on.golden b/cli/cmd/testdata/upgrade_add_add-on.golden index b64371b282590..2a00560b3d9c5 100644 --- a/cli/cmd/testdata/upgrade_add_add-on.golden +++ b/cli/cmd/testdata/upgrade_add_add-on.golden @@ -2458,7 +2458,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: collector: image: omnition/opencensus-collector:0.1.11 diff --git a/cli/cmd/testdata/upgrade_default.golden b/cli/cmd/testdata/upgrade_default.golden index e29f4b228bb7b..e0a4026fff390 100644 --- a/cli/cmd/testdata/upgrade_default.golden +++ b/cli/cmd/testdata/upgrade_default.golden @@ -2457,7 +2457,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: enabled: false --- diff --git a/cli/cmd/testdata/upgrade_external_issuer.golden b/cli/cmd/testdata/upgrade_external_issuer.golden index 7bb375f1e5541..8322b8c69f022 100644 --- a/cli/cmd/testdata/upgrade_external_issuer.golden +++ b/cli/cmd/testdata/upgrade_external_issuer.golden @@ -2443,7 +2443,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: enabled: false --- diff --git a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml index 5d23f52329c84..997f34f10bdcb 100644 --- a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml @@ -2457,7 +2457,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: enabled: false --- diff --git a/cli/cmd/testdata/upgrade_grafana_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_disabled.yaml index cb77f61bc3576..7d16251229f4d 100644 --- a/cli/cmd/testdata/upgrade_grafana_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_disabled.yaml @@ -2450,7 +2450,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: enabled: false --- diff --git a/cli/cmd/testdata/upgrade_grafana_enabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled.yaml index 5d23f52329c84..997f34f10bdcb 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled.yaml @@ -2457,7 +2457,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: enabled: false --- diff --git a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml index cb77f61bc3576..7d16251229f4d 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml @@ -2450,7 +2450,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: enabled: false --- diff --git a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml index f7819f9a5b965..17a1f1d9c8322 100644 --- a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml @@ -2457,7 +2457,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: enabled: false --- diff --git a/cli/cmd/testdata/upgrade_ha.golden b/cli/cmd/testdata/upgrade_ha.golden index 6c9a5d3f9f1c0..efff45601dd33 100644 --- a/cli/cmd/testdata/upgrade_ha.golden +++ b/cli/cmd/testdata/upgrade_ha.golden @@ -2700,7 +2700,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus resources: cpu: limit: "4" diff --git a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden index 37a08f48a4f81..2a467821011fc 100644 --- a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden +++ b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden @@ -2457,7 +2457,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: enabled: false --- diff --git a/cli/cmd/testdata/upgrade_nothing_addon.yaml b/cli/cmd/testdata/upgrade_nothing_addon.yaml index 5d23f52329c84..997f34f10bdcb 100644 --- a/cli/cmd/testdata/upgrade_nothing_addon.yaml +++ b/cli/cmd/testdata/upgrade_nothing_addon.yaml @@ -2457,7 +2457,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: enabled: false --- diff --git a/cli/cmd/testdata/upgrade_overwrite_issuer.golden b/cli/cmd/testdata/upgrade_overwrite_issuer.golden index 96fd6e748739b..99e2ef3c61f83 100644 --- a/cli/cmd/testdata/upgrade_overwrite_issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_issuer.golden @@ -2443,7 +2443,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: enabled: false --- diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden index 761d5d3434820..b49f8ffe2cf1a 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden @@ -2429,7 +2429,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: enabled: false --- diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden index 96fd6e748739b..99e2ef3c61f83 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden @@ -2443,7 +2443,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: enabled: false --- diff --git a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden index 3a056af2cdc8a..b19caa8a8b9bd 100644 --- a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden +++ b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden @@ -2457,7 +2457,6 @@ data: scrape_interval: 10s scrape_timeout: 10s image: prom/prometheus:v2.15.2 - name: linkerd-prometheus tracing: enabled: false --- diff --git a/pkg/charts/linkerd2/values_test.go b/pkg/charts/linkerd2/values_test.go index 78627e618b3a8..18008cbb8f6aa 100644 --- a/pkg/charts/linkerd2/values_test.go +++ b/pkg/charts/linkerd2/values_test.go @@ -30,7 +30,6 @@ func TestNewValues(t *testing.T) { InstallNamespace: true, Prometheus: Prometheus{ "enabled": true, - "name": "linkerd-prometheus", "image": "prom/prometheus:v2.15.2", "args": map[string]interface{}{ "log.level": "info", @@ -226,7 +225,6 @@ func TestNewValues(t *testing.T) { expected.Prometheus = Prometheus{ "enabled": true, - "name": "linkerd-prometheus", "image": "prom/prometheus:v2.15.2", "args": map[string]interface{}{ "log.level": "info", From 95c3ae2fcbec73cd9d45428a052068cc8d2aa34d Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Tue, 16 Jun 2020 15:33:35 +0530 Subject: [PATCH 23/42] update golden files Signed-off-by: Tarun Pothulapati --- cli/cmd/testdata/install_output.golden | 1 - cli/cmd/testdata/install_prometheus_overwrite.golden | 3 +++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/cli/cmd/testdata/install_output.golden b/cli/cmd/testdata/install_output.golden index 3c8616a1b59cf..90e9e9f1caa7f 100644 --- a/cli/cmd/testdata/install_output.golden +++ b/cli/cmd/testdata/install_output.golden @@ -2430,7 +2430,6 @@ data: prometheus: enabled: true image: PrometheusImage - name: linkerd-prometheus tracing: enabled: false --- diff --git a/cli/cmd/testdata/install_prometheus_overwrite.golden b/cli/cmd/testdata/install_prometheus_overwrite.golden index 6cde822cf3161..1555209a24d41 100644 --- a/cli/cmd/testdata/install_prometheus_overwrite.golden +++ b/cli/cmd/testdata/install_prometheus_overwrite.golden @@ -276,6 +276,9 @@ rules: - apiGroups: ["linkerd.io"] resources: ["serviceprofiles"] verbs: ["list"] +- apiGroups: ["apiregistration.k8s.io"] + resources: ["apiservices"] + verbs: ["get"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding From 4921b8675e8ffd121a2a88f124f7ea187b5459b7 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Tue, 16 Jun 2020 17:28:59 +0530 Subject: [PATCH 24/42] move defaults out of prometheus. Signed-off-by: Tarun Pothulapati --- .../prometheus/templates/prometheus.yaml | 13 ++++++-- charts/linkerd2/values.yaml | 20 ++++++------- pkg/charts/linkerd2/values_test.go | 30 +++---------------- 3 files changed, 23 insertions(+), 40 deletions(-) diff --git a/charts/add-ons/prometheus/templates/prometheus.yaml b/charts/add-ons/prometheus/templates/prometheus.yaml index c920f79cbe7ee..2668de01fb187 100644 --- a/charts/add-ons/prometheus/templates/prometheus.yaml +++ b/charts/add-ons/prometheus/templates/prometheus.yaml @@ -15,10 +15,13 @@ metadata: {{.Values.global.createdByAnnotation}}: {{default (printf "linkerd/helm %s" .Values.global.linkerdVersion) .Values.global.cliVersion}} data: prometheus.yml: |- - {{ if .Values.globalConfig -}} global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + {{ if .Values.globalConfig -}} {{- toYaml .Values.globalConfig | trim | nindent 6 }} - {{- end}} + {{- end}} rule_files: - /etc/prometheus/*_rules.yml @@ -224,10 +227,14 @@ spec: {{- include "linkerd.node-selector" . | nindent 6 }} containers: - args: + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level={{lower .Values.logLevel}} {{- range $key, $value := .Values.args}} - --{{ $key }}{{ if $value }}={{ $value }}{{ end }} {{- end }} - image: {{.Values.image}} + image: {{ default "prom/prometheus:v2.15.2" .Values.image }} imagePullPolicy: {{.Values.global.imagePullPolicy}} livenessProbe: httpGet: diff --git a/charts/linkerd2/values.yaml b/charts/linkerd2/values.yaml index 73ecff6521716..dad164fff3267 100644 --- a/charts/linkerd2/values.yaml +++ b/charts/linkerd2/values.yaml @@ -204,17 +204,14 @@ grafana: prometheus: enabled: true - image: prom/prometheus:v2.15.2 - args: - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h - config.file: /etc/prometheus/prometheus.yml - log.level: *controller_log_level - globalConfig: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - # resources: + logLevel: *controller_log_level + # image: prom/prometheus:v2.15.3 + # args: + # storage.tsdb.retention.time: 6h + # log.level: debug + # globalConfig: + # scrape_interval: 10s + # scrape_timeout: 10s # scrapeConfigs: # - job_name: 'kubernetes-nodes' # scheme: https @@ -241,6 +238,7 @@ prometheus: # - name: recording-rules # subPath: recording_rules.yml # configMap: linkerd-prometheus-rules + # resources: tracing: enabled: false diff --git a/pkg/charts/linkerd2/values_test.go b/pkg/charts/linkerd2/values_test.go index 18008cbb8f6aa..bb360d9703e72 100644 --- a/pkg/charts/linkerd2/values_test.go +++ b/pkg/charts/linkerd2/values_test.go @@ -29,19 +29,8 @@ func TestNewValues(t *testing.T) { HeartbeatSchedule: "0 0 * * *", InstallNamespace: true, Prometheus: Prometheus{ - "enabled": true, - "image": "prom/prometheus:v2.15.2", - "args": map[string]interface{}{ - "log.level": "info", - "config.file": "/etc/prometheus/prometheus.yml", - "storage.tsdb.path": "/data", - "storage.tsdb.retention.time": "6h", - }, - "globalConfig": map[string]interface{}{ - "evaluation_interval": "10s", - "scrape_interval": "10s", - "scrape_timeout": "10s", - }, + "enabled": true, + "logLevel": "info", }, Global: &Global{ Namespace: "linkerd", @@ -224,19 +213,8 @@ func TestNewValues(t *testing.T) { } expected.Prometheus = Prometheus{ - "enabled": true, - "image": "prom/prometheus:v2.15.2", - "args": map[string]interface{}{ - "log.level": "info", - "config.file": "/etc/prometheus/prometheus.yml", - "storage.tsdb.path": "/data", - "storage.tsdb.retention.time": "6h", - }, - "globalConfig": map[string]interface{}{ - "evaluation_interval": "10s", - "scrape_interval": "10s", - "scrape_timeout": "10s", - }, + "enabled": true, + "logLevel": "info", "resources": map[string]interface{}{ "cpu": map[string]interface{}{ "limit": "4", From 339f032d39eab18ff4a380304c45683ae0df17bb Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Fri, 19 Jun 2020 08:45:09 +0000 Subject: [PATCH 25/42] move controllerLogLevel to global the controllerLogLevel is used by all the add-ons to set their log-level, and a relevant test has been removed as the variable can't be tested directly Signed-off-by: Tarun Pothulapati --- .../prometheus/templates/prometheus.yaml | 4 +- charts/linkerd2/values.yaml | 3 +- cli/cmd/install.go | 19 +------ cli/cmd/install_test.go | 28 +--------- .../install_addon_control-plane.golden | 32 ++++------- cli/cmd/testdata/install_control-plane.golden | 32 ++++------- ...install_controlplane_tracing_output.golden | 32 ++++------- .../testdata/install_custom_registry.golden | 32 ++++------- cli/cmd/testdata/install_default.golden | 32 ++++------- .../testdata/install_grafana_existing.golden | 32 ++++------- cli/cmd/testdata/install_ha_output.golden | 32 ++++------- .../install_ha_with_overrides_output.golden | 32 ++++------- .../install_heartbeat_disabled_output.golden | 30 ++++------- cli/cmd/testdata/install_helm_output.golden | 32 ++++------- .../install_helm_output_addons.golden | 32 ++++------- .../testdata/install_helm_output_ha.golden | 32 ++++------- .../testdata/install_no_init_container.golden | 32 ++++------- cli/cmd/testdata/install_output.golden | 25 +++++---- .../install_prometheus_overwrite.golden | 34 +++++------- cli/cmd/testdata/install_proxy_ignores.golden | 32 ++++------- .../install_restricted_dashboard.golden | 32 ++++------- cli/cmd/testdata/install_tracing.golden | 32 ++++------- .../testdata/install_tracing_overwrite.golden | 32 ++++------- .../upgrade_add-on_controlplane.golden | 32 ++++------- .../testdata/upgrade_add-on_overwrite.golden | 32 ++++------- cli/cmd/testdata/upgrade_add_add-on.golden | 32 ++++------- cli/cmd/testdata/upgrade_default.golden | 32 ++++------- .../testdata/upgrade_external_issuer.golden | 32 ++++------- .../upgrade_grafana_addon_overwrite.yaml | 32 ++++------- .../testdata/upgrade_grafana_disabled.yaml | 32 ++++------- cli/cmd/testdata/upgrade_grafana_enabled.yaml | 32 ++++------- .../upgrade_grafana_enabled_disabled.yaml | 32 ++++------- .../testdata/upgrade_grafana_overwrite.yaml | 32 ++++------- cli/cmd/testdata/upgrade_ha.golden | 32 ++++------- .../upgrade_keep_webhook_cabundle.golden | 32 ++++------- cli/cmd/testdata/upgrade_nothing_addon.yaml | 32 ++++------- .../testdata/upgrade_overwrite_issuer.golden | 32 ++++------- ...write_trust_anchors-external-issuer.golden | 32 ++++------- .../upgrade_overwrite_trust_anchors.golden | 32 ++++------- .../upgrade_two_level_webhook_cert.golden | 32 ++++------- pkg/charts/linkerd2/values.go | 54 +++++++++---------- pkg/charts/linkerd2/values_test.go | 8 ++- 42 files changed, 437 insertions(+), 824 deletions(-) diff --git a/charts/add-ons/prometheus/templates/prometheus.yaml b/charts/add-ons/prometheus/templates/prometheus.yaml index 2668de01fb187..9cb84b75ceaa8 100644 --- a/charts/add-ons/prometheus/templates/prometheus.yaml +++ b/charts/add-ons/prometheus/templates/prometheus.yaml @@ -19,7 +19,7 @@ data: scrape_interval: 10s scrape_timeout: 10s evaluation_interval: 10s - {{ if .Values.globalConfig -}} + {{- if .Values.globalConfig -}} {{- toYaml .Values.globalConfig | trim | nindent 6 }} {{- end}} @@ -230,7 +230,7 @@ spec: - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - - --log.level={{lower .Values.logLevel}} + - --log.level={{lower (default .Values.global.controllerLogLevel .Values.logLevel)}} {{- range $key, $value := .Values.args}} - --{{ $key }}{{ if $value }}={{ $value }}{{ end }} {{- end }} diff --git a/charts/linkerd2/values.yaml b/charts/linkerd2/values.yaml index dad164fff3267..2d750abbde8d6 100644 --- a/charts/linkerd2/values.yaml +++ b/charts/linkerd2/values.yaml @@ -6,6 +6,7 @@ global: clusterDomain: &cluster_domain cluster.local imagePullPolicy: &image_pull_policy IfNotPresent + controllerLogLevel: &controller_log_level info # control plane trace configuration controlPlaneTracing: false @@ -91,7 +92,6 @@ webhookFailurePolicy: Ignore # controller configuration controllerImage: gcr.io/linkerd-io/controller -controllerLogLevel: &controller_log_level info controllerReplicas: 1 controllerUID: 2103 @@ -204,7 +204,6 @@ grafana: prometheus: enabled: true - logLevel: *controller_log_level # image: prom/prometheus:v2.15.3 # args: # storage.tsdb.retention.time: 6h diff --git a/cli/cmd/install.go b/cli/cmd/install.go index 22362bf526cc4..48115eade07fc 100644 --- a/cli/cmd/install.go +++ b/cli/cmd/install.go @@ -43,7 +43,6 @@ type ( controlPlaneVersion string controllerReplicas uint controllerLogLevel string - prometheusImage string highAvailability bool controllerUID int64 disableH2Upgrade bool @@ -179,8 +178,7 @@ func newInstallOptionsWithDefaults() (*installOptions, error) { clusterDomain: defaults.Global.ClusterDomain, controlPlaneVersion: version.Version, controllerReplicas: defaults.ControllerReplicas, - controllerLogLevel: defaults.ControllerLogLevel, - prometheusImage: defaults.Prometheus["image"].(string), + controllerLogLevel: defaults.Global.ControllerLogLevel, highAvailability: defaults.Global.HighAvailability, controllerUID: defaults.ControllerUID, disableH2Upgrade: !defaults.EnableH2Upgrade, @@ -456,11 +454,6 @@ func (options *installOptions) recordableFlagSet() *pflag.FlagSet { "Log level for the controller and web components", ) - flags.StringVar( - &options.prometheusImage, "prometheus-image", options.prometheusImage, - "Custom Prometheus image name", - ) - flags.BoolVar( &options.highAvailability, "ha", options.highAvailability, "Enable HA deployment config for the control plane (default false)", @@ -675,10 +668,6 @@ func (options *installOptions) validate() error { return fmt.Errorf("--controller-log-level must be one of: panic, fatal, error, warn, info, debug") } - if options.prometheusImage != "" && !alphaNumDashDotSlashColonUnderscore.MatchString(options.prometheusImage) { - return fmt.Errorf("%s is not a valid prometheus image", options.prometheusImage) - } - if err := options.proxyConfigOptions.validate(); err != nil { return err } @@ -752,7 +741,7 @@ func (options *installOptions) buildValuesWithoutIdentity(configs *pb.All) (*l5d installValues.Configs.Install = installJSON installValues.ControllerImage = fmt.Sprintf("%s/controller", options.dockerRegistry) installValues.Global.ControllerImageVersion = configs.GetGlobal().GetVersion() - installValues.ControllerLogLevel = options.controllerLogLevel + installValues.Global.ControllerLogLevel = options.controllerLogLevel installValues.ControllerReplicas = options.controllerReplicas installValues.ControllerUID = options.controllerUID installValues.Global.ControlPlaneTracing = options.controlPlaneTracing @@ -761,13 +750,9 @@ func (options *installOptions) buildValuesWithoutIdentity(configs *pb.All) (*l5d installValues.Global.HighAvailability = options.highAvailability installValues.Global.ImagePullPolicy = options.imagePullPolicy installValues.Grafana["image"].(map[string]interface{})["name"] = fmt.Sprintf("%s/grafana", options.dockerRegistry) - if options.prometheusImage != "" { - installValues.Prometheus["image"] = options.prometheusImage - } installValues.Global.Namespace = controlPlaneNamespace installValues.Global.CNIEnabled = options.cniEnabled installValues.OmitWebhookSideEffects = options.omitWebhookSideEffects - installValues.Prometheus["args"].(map[string]interface{})["log.level"] = toPromLogLevel(strings.ToLower(options.controllerLogLevel)) installValues.HeartbeatSchedule = options.heartbeatSchedule() installValues.RestrictDashboardPrivileges = options.restrictDashboardPrivileges installValues.DisableHeartBeat = options.disableHeartbeat diff --git a/cli/cmd/install_test.go b/cli/cmd/install_test.go index 91f607b25f445..178210634793b 100644 --- a/cli/cmd/install_test.go +++ b/cli/cmd/install_test.go @@ -7,7 +7,6 @@ import ( "testing" "github.com/linkerd/linkerd2/controller/gen/config" - pb "github.com/linkerd/linkerd2/controller/gen/config" charts "github.com/linkerd/linkerd2/pkg/charts/linkerd2" ) @@ -55,7 +54,6 @@ func TestRender(t *testing.T) { metaValues := &charts.Values{ ControllerImage: "ControllerImage", WebImage: "WebImage", - ControllerLogLevel: "ControllerLogLevel", ControllerUID: 2103, EnableH2Upgrade: true, WebhookFailurePolicy: "WebhookFailurePolicy", @@ -70,6 +68,7 @@ func TestRender(t *testing.T) { ImagePullPolicy: "ImagePullPolicy", CliVersion: "CliVersion", ControllerComponentLabel: "ControllerComponentLabel", + ControllerLogLevel: "ControllerLogLevel", ControllerImageVersion: "ControllerImageVersion", ControllerNamespaceLabel: "ControllerNamespaceLabel", WorkloadNamespaceLabel: "WorkloadNamespaceLabel", @@ -342,31 +341,6 @@ func TestValidate(t *testing.T) { } }) - t.Run("Ensure log level input is converted to lower case before passing to prometheus", func(t *testing.T) { - underTest, err := testInstallOptions() - if err != nil { - t.Fatalf("Unexpected error: %v\n", err) - } - - underTest.controllerLogLevel = "DEBUG" - expected := "debug" - - testValues := new(pb.All) - testValues.Global = new(pb.Global) - testValues.Proxy = new(pb.Proxy) - testValues.Install = new(pb.Install) - - actual, err := underTest.buildValuesWithoutIdentity(testValues) - - if err != nil { - t.Fatalf("Unexpected error occurred %s", err) - } - - if actual.Prometheus["args"].(map[string]interface{})["log.level"] != expected { - t.Fatalf("Expected error string\"%s\", got \"%s\"", expected, actual.Prometheus["args"].(map[string]interface{})["log.level"]) - } - }) - t.Run("Properly validates proxy log level", func(t *testing.T) { testCases := []struct { input string diff --git a/cli/cmd/testdata/install_addon_control-plane.golden b/cli/cmd/testdata/install_addon_control-plane.golden index aff6853de4980..0028957bfea17 100644 --- a/cli/cmd/testdata/install_addon_control-plane.golden +++ b/cli/cmd/testdata/install_addon_control-plane.golden @@ -92,7 +92,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -316,7 +316,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -535,7 +535,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -733,7 +733,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -803,7 +803,7 @@ spec: - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -jaeger-addr=linkerd-jaeger.linkerd.svc.cluster.local:16686 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -999,7 +999,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1238,7 +1238,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1459,7 +1459,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1647,17 +1647,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: collector: image: omnition/opencensus-collector:0.1.11 @@ -1969,9 +1959,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2155,10 +2145,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_control-plane.golden b/cli/cmd/testdata/install_control-plane.golden index 029be3025091a..22dd99ec94024 100644 --- a/cli/cmd/testdata/install_control-plane.golden +++ b/cli/cmd/testdata/install_control-plane.golden @@ -92,7 +92,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -316,7 +316,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -535,7 +535,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -733,7 +733,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -802,7 +802,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -998,7 +998,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1237,7 +1237,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1458,7 +1458,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1646,17 +1646,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -1962,9 +1952,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2148,10 +2138,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_controlplane_tracing_output.golden b/cli/cmd/testdata/install_controlplane_tracing_output.golden index 6f7f9f3864ac6..868845d445f5e 100644 --- a/cli/cmd/testdata/install_controlplane_tracing_output.golden +++ b/cli/cmd/testdata/install_controlplane_tracing_output.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= - -trace-collector=linkerd-collector.linkerd.svc.cluster.local:55678 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1120,7 +1120,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -trace-collector=linkerd-collector.linkerd.svc.cluster.local:55678 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1354,7 +1354,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= - -trace-collector=linkerd-collector.linkerd.svc.cluster.local:55678 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1567,7 +1567,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1636,7 +1636,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ - -trace-collector=linkerd-collector.linkerd.svc.cluster.local:55678 image: gcr.io/linkerd-io/web:install-control-plane-version @@ -1847,7 +1847,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2100,7 +2100,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2335,7 +2335,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -trace-collector=linkerd-collector.linkerd.svc.cluster.local:55678 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent @@ -2538,17 +2538,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -2922,9 +2912,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3108,10 +3098,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_custom_registry.golden b/cli/cmd/testdata/install_custom_registry.golden index 7035ca586e069..1690ec347ea9b 100644 --- a/cli/cmd/testdata/install_custom_registry.golden +++ b/cli/cmd/testdata/install_custom_registry.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: my.custom.registry/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1105,7 +1105,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: my.custom.registry/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1324,7 +1324,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: my.custom.registry/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1522,7 +1522,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1591,7 +1591,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: my.custom.registry/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1787,7 +1787,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: my.custom.registry/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2026,7 +2026,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: my.custom.registry/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2247,7 +2247,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: my.custom.registry/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2435,17 +2435,7 @@ data: name: my.custom.registry/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -2805,9 +2795,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2991,10 +2981,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_default.golden b/cli/cmd/testdata/install_default.golden index 255e2215fbea0..e6cc0a121fded 100644 --- a/cli/cmd/testdata/install_default.golden +++ b/cli/cmd/testdata/install_default.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1105,7 +1105,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1324,7 +1324,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1522,7 +1522,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1591,7 +1591,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1787,7 +1787,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2026,7 +2026,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2247,7 +2247,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2435,17 +2435,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -2805,9 +2795,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2991,10 +2981,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_grafana_existing.golden b/cli/cmd/testdata/install_grafana_existing.golden index 40eb7539a227a..a4380ed92c81d 100644 --- a/cli/cmd/testdata/install_grafana_existing.golden +++ b/cli/cmd/testdata/install_grafana_existing.golden @@ -878,7 +878,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1102,7 +1102,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1321,7 +1321,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1519,7 +1519,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1588,7 +1588,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=somegrafana.xyz - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1784,7 +1784,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2023,7 +2023,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2244,7 +2244,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2429,17 +2429,7 @@ data: grafana: enabled: false prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -2501,9 +2491,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2687,10 +2677,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_ha_output.golden b/cli/cmd/testdata/install_ha_output.golden index 4a46e1ed9619f..81be27df38366 100644 --- a/cli/cmd/testdata/install_ha_output.golden +++ b/cli/cmd/testdata/install_ha_output.golden @@ -904,7 +904,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1164,7 +1164,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1419,7 +1419,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1630,7 +1630,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" resources: limits: cpu: "1" @@ -1706,7 +1706,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1938,7 +1938,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2213,7 +2213,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2470,7 +2470,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2678,17 +2678,7 @@ data: limit: 1024Mi request: 50Mi prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 resources: cpu: limit: "4" @@ -3068,9 +3058,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3254,10 +3244,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_ha_with_overrides_output.golden b/cli/cmd/testdata/install_ha_with_overrides_output.golden index d05ab541fe816..be1e91026572f 100644 --- a/cli/cmd/testdata/install_ha_with_overrides_output.golden +++ b/cli/cmd/testdata/install_ha_with_overrides_output.golden @@ -904,7 +904,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1164,7 +1164,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1419,7 +1419,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1630,7 +1630,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" resources: limits: cpu: "1" @@ -1706,7 +1706,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1938,7 +1938,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2213,7 +2213,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2470,7 +2470,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2678,17 +2678,7 @@ data: limit: 1024Mi request: 50Mi prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 resources: cpu: limit: "4" @@ -3068,9 +3058,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3254,10 +3244,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_heartbeat_disabled_output.golden b/cli/cmd/testdata/install_heartbeat_disabled_output.golden index 69242828f7a71..bda5827618d42 100644 --- a/cli/cmd/testdata/install_heartbeat_disabled_output.golden +++ b/cli/cmd/testdata/install_heartbeat_disabled_output.golden @@ -837,7 +837,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1061,7 +1061,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1280,7 +1280,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1502,7 +1502,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1698,7 +1698,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1937,7 +1937,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2158,7 +2158,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2346,17 +2346,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -2716,9 +2706,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2902,10 +2892,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_helm_output.golden b/cli/cmd/testdata/install_helm_output.golden index fe249d08376f4..5fcfac491de34 100644 --- a/cli/cmd/testdata/install_helm_output.golden +++ b/cli/cmd/testdata/install_helm_output.golden @@ -969,7 +969,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1186,7 +1186,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1398,7 +1398,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1589,7 +1589,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1660,7 +1660,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:linkerd-version imagePullPolicy: IfNotPresent @@ -1850,7 +1850,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2083,7 +2083,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2298,7 +2298,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2481,17 +2481,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -2850,9 +2840,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3036,10 +3026,10 @@ spec: null containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_helm_output_addons.golden b/cli/cmd/testdata/install_helm_output_addons.golden index e28e3ec24ac6a..e8e798866df55 100644 --- a/cli/cmd/testdata/install_helm_output_addons.golden +++ b/cli/cmd/testdata/install_helm_output_addons.golden @@ -969,7 +969,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1186,7 +1186,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1398,7 +1398,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1589,7 +1589,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1661,7 +1661,7 @@ spec: - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -jaeger-addr=linkerd-jaeger.linkerd.svc.cluster.local:16686 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:linkerd-version imagePullPolicy: IfNotPresent @@ -1851,7 +1851,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2084,7 +2084,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2299,7 +2299,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2482,17 +2482,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: collector: image: omnition/opencensus-collector:0.1.11 @@ -2857,9 +2847,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3043,10 +3033,10 @@ spec: null containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_helm_output_ha.golden b/cli/cmd/testdata/install_helm_output_ha.golden index 7ba97e14fad34..63378aae76ec6 100644 --- a/cli/cmd/testdata/install_helm_output_ha.golden +++ b/cli/cmd/testdata/install_helm_output_ha.golden @@ -992,7 +992,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1245,7 +1245,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1493,7 +1493,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1697,7 +1697,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" resources: limits: cpu: "1" @@ -1775,7 +1775,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:linkerd-version imagePullPolicy: IfNotPresent @@ -2001,7 +2001,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2270,7 +2270,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2521,7 +2521,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2724,17 +2724,7 @@ data: limit: 1024Mi request: 50Mi prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 resources: cpu: limit: "4" @@ -3113,9 +3103,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3299,10 +3289,10 @@ spec: null containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_no_init_container.golden b/cli/cmd/testdata/install_no_init_container.golden index 581fdc7417388..1b88ddd249791 100644 --- a/cli/cmd/testdata/install_no_init_container.golden +++ b/cli/cmd/testdata/install_no_init_container.golden @@ -878,7 +878,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1069,7 +1069,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1255,7 +1255,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1420,7 +1420,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1489,7 +1489,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1652,7 +1652,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1858,7 +1858,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2046,7 +2046,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2201,17 +2201,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -2538,9 +2528,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2724,10 +2714,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_output.golden b/cli/cmd/testdata/install_output.golden index 90e9e9f1caa7f..e5ad2956a9008 100644 --- a/cli/cmd/testdata/install_output.golden +++ b/cli/cmd/testdata/install_output.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level=ControllerLogLevel + - -log-level= image: ControllerImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy livenessProbe: @@ -1104,7 +1104,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.Namespace.svc.cluster.local:9090 - -destination-addr=linkerd-dst.Namespace.svc.cluster.local:8086 - -controller-namespace=Namespace - - -log-level=ControllerLogLevel + - -log-level= image: ControllerImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy livenessProbe: @@ -1322,7 +1322,7 @@ spec: - -addr=:8086 - -controller-namespace=Namespace - -enable-h2-upgrade=true - - -log-level=ControllerLogLevel + - -log-level= image: ControllerImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy livenessProbe: @@ -1519,7 +1519,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.Namespace.svc.cluster.local:9090" - "-controller-namespace=Namespace" - - "-log-level=ControllerLogLevel" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1588,7 +1588,7 @@ spec: - -api-addr=linkerd-controller-api.Namespace.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.Namespace.svc.cluster.local:3000 - -controller-namespace=Namespace - - -log-level=ControllerLogLevel + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.Namespace\.svc\.cluster\.local|linkerd-web\.Namespace\.svc|\[::1\])(:\d+)?$ image: WebImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy @@ -1783,7 +1783,7 @@ spec: containers: - args: - proxy-injector - - -log-level=ControllerLogLevel + - -log-level= image: ControllerImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy livenessProbe: @@ -2021,7 +2021,7 @@ spec: containers: - args: - sp-validator - - -log-level=ControllerLogLevel + - -log-level= image: ControllerImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy livenessProbe: @@ -2241,7 +2241,7 @@ spec: - args: - tap - -controller-namespace=Namespace - - -log-level=ControllerLogLevel + - -log-level= image: ControllerImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy livenessProbe: @@ -2787,7 +2787,10 @@ metadata: CreatedByAnnotation: CliVersion data: prometheus.yml: |- - + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2971,6 +2974,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=controllerloglevel image: PrometheusImage imagePullPolicy: ImagePullPolicy livenessProbe: diff --git a/cli/cmd/testdata/install_prometheus_overwrite.golden b/cli/cmd/testdata/install_prometheus_overwrite.golden index 1555209a24d41..06697fc09e4e3 100644 --- a/cli/cmd/testdata/install_prometheus_overwrite.golden +++ b/cli/cmd/testdata/install_prometheus_overwrite.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1105,7 +1105,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1324,7 +1324,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1522,7 +1522,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1591,7 +1591,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1787,7 +1787,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2026,7 +2026,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2247,7 +2247,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2444,19 +2444,12 @@ data: - action: labeldrop regex: prometheus_replica args: - config.file: /etc/prometheus/prometheus.yml log.format: json - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true globalConfig: evaluation_interval: 2m external_labels: cluster: cluster-1 - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 remoteWrite: - url: http://cortex-service.default:9009/api/prom/push ruleConfigMapMounts: @@ -2836,11 +2829,12 @@ metadata: data: prometheus.yml: |- global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s evaluation_interval: 2m external_labels: cluster: cluster-1 - scrape_interval: 10s - scrape_timeout: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3047,11 +3041,11 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.format=json - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --log.format=json image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_proxy_ignores.golden b/cli/cmd/testdata/install_proxy_ignores.golden index b2b9c5e2be55b..004843fdfdc2a 100644 --- a/cli/cmd/testdata/install_proxy_ignores.golden +++ b/cli/cmd/testdata/install_proxy_ignores.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1105,7 +1105,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1324,7 +1324,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1522,7 +1522,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1591,7 +1591,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1787,7 +1787,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2026,7 +2026,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2247,7 +2247,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2435,17 +2435,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -2805,9 +2795,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2991,10 +2981,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_restricted_dashboard.golden b/cli/cmd/testdata/install_restricted_dashboard.golden index 5d0293c670df5..1d63b8bacba07 100644 --- a/cli/cmd/testdata/install_restricted_dashboard.golden +++ b/cli/cmd/testdata/install_restricted_dashboard.golden @@ -813,7 +813,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1037,7 +1037,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1256,7 +1256,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1454,7 +1454,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1523,7 +1523,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1719,7 +1719,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1958,7 +1958,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2179,7 +2179,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2367,17 +2367,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -2737,9 +2727,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2923,10 +2913,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_tracing.golden b/cli/cmd/testdata/install_tracing.golden index 79d6495427a1e..b34c96ff43a36 100644 --- a/cli/cmd/testdata/install_tracing.golden +++ b/cli/cmd/testdata/install_tracing.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1105,7 +1105,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1324,7 +1324,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1522,7 +1522,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1592,7 +1592,7 @@ spec: - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -jaeger-addr=linkerd-jaeger.linkerd.svc.cluster.local:16686 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1788,7 +1788,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2027,7 +2027,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2248,7 +2248,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2436,17 +2436,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: collector: image: omnition/opencensus-collector:0.1.11 @@ -2812,9 +2802,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2998,10 +2988,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_tracing_overwrite.golden b/cli/cmd/testdata/install_tracing_overwrite.golden index 5ae6abb56115e..0c36b057738f6 100644 --- a/cli/cmd/testdata/install_tracing_overwrite.golden +++ b/cli/cmd/testdata/install_tracing_overwrite.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1105,7 +1105,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1324,7 +1324,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1522,7 +1522,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1592,7 +1592,7 @@ spec: - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -jaeger-addr=linkerd-jaeger.linkerd.svc.cluster.local:16686 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1788,7 +1788,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2027,7 +2027,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2248,7 +2248,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2436,17 +2436,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: collector: image: overwrite-collector-image @@ -2812,9 +2802,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2998,10 +2988,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_add-on_controlplane.golden b/cli/cmd/testdata/upgrade_add-on_controlplane.golden index f1c0cc5d5c143..e07f3a2f17a7d 100644 --- a/cli/cmd/testdata/upgrade_add-on_controlplane.golden +++ b/cli/cmd/testdata/upgrade_add-on_controlplane.golden @@ -92,7 +92,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -318,7 +318,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -539,7 +539,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -739,7 +739,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -809,7 +809,7 @@ spec: - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -jaeger-addr=linkerd-jaeger.linkerd.svc.cluster.local:16686 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1007,7 +1007,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1248,7 +1248,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1471,7 +1471,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1661,17 +1661,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: collector: image: omnition/opencensus-collector:0.1.11 @@ -1985,9 +1975,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2171,10 +2161,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_add-on_overwrite.golden b/cli/cmd/testdata/upgrade_add-on_overwrite.golden index e123527909191..d3d50893ecccc 100644 --- a/cli/cmd/testdata/upgrade_add-on_overwrite.golden +++ b/cli/cmd/testdata/upgrade_add-on_overwrite.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1107,7 +1107,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1328,7 +1328,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1528,7 +1528,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1598,7 +1598,7 @@ spec: - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -jaeger-addr=linkerd-jaeger.linkerd.svc.cluster.local:16686 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1796,7 +1796,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2037,7 +2037,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2260,7 +2260,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2450,17 +2450,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: collector: image: overwrite-collector-image @@ -2830,9 +2820,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3016,10 +3006,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_add_add-on.golden b/cli/cmd/testdata/upgrade_add_add-on.golden index f9ce5745f391f..1de38d8488d15 100644 --- a/cli/cmd/testdata/upgrade_add_add-on.golden +++ b/cli/cmd/testdata/upgrade_add_add-on.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1107,7 +1107,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1328,7 +1328,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1528,7 +1528,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1598,7 +1598,7 @@ spec: - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -jaeger-addr=linkerd-jaeger.linkerd.svc.cluster.local:16686 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1796,7 +1796,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2037,7 +2037,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2260,7 +2260,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2450,17 +2450,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: collector: image: omnition/opencensus-collector:0.1.11 @@ -2828,9 +2818,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3014,10 +3004,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_default.golden b/cli/cmd/testdata/upgrade_default.golden index 048dd9f9ccfb3..35d1cb1bc6685 100644 --- a/cli/cmd/testdata/upgrade_default.golden +++ b/cli/cmd/testdata/upgrade_default.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1107,7 +1107,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1328,7 +1328,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1528,7 +1528,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1597,7 +1597,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1795,7 +1795,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2036,7 +2036,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2259,7 +2259,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2449,17 +2449,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -2821,9 +2811,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3007,10 +2997,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_external_issuer.golden b/cli/cmd/testdata/upgrade_external_issuer.golden index e140dbb2201f7..9c82f5cef909f 100644 --- a/cli/cmd/testdata/upgrade_external_issuer.golden +++ b/cli/cmd/testdata/upgrade_external_issuer.golden @@ -867,7 +867,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1093,7 +1093,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1314,7 +1314,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1514,7 +1514,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1583,7 +1583,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1781,7 +1781,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2022,7 +2022,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2245,7 +2245,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2435,17 +2435,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -2807,9 +2797,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2993,10 +2983,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml index 8e804f9e0ae4a..4d9fd62901161 100644 --- a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1107,7 +1107,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1328,7 +1328,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1528,7 +1528,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1597,7 +1597,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1795,7 +1795,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2036,7 +2036,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2259,7 +2259,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2449,17 +2449,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -2821,9 +2811,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3007,10 +2997,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_grafana_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_disabled.yaml index f79ff8db152b3..526cbc69f08ca 100644 --- a/cli/cmd/testdata/upgrade_grafana_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_disabled.yaml @@ -878,7 +878,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1104,7 +1104,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1325,7 +1325,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1525,7 +1525,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1593,7 +1593,7 @@ spec: - args: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1791,7 +1791,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2032,7 +2032,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2255,7 +2255,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2442,17 +2442,7 @@ data: grafana: enabled: false prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -2514,9 +2504,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2700,10 +2690,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_grafana_enabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled.yaml index 8e804f9e0ae4a..4d9fd62901161 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled.yaml @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1107,7 +1107,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1328,7 +1328,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1528,7 +1528,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1597,7 +1597,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1795,7 +1795,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2036,7 +2036,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2259,7 +2259,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2449,17 +2449,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -2821,9 +2811,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3007,10 +2997,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml index f79ff8db152b3..526cbc69f08ca 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml @@ -878,7 +878,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1104,7 +1104,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1325,7 +1325,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1525,7 +1525,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1593,7 +1593,7 @@ spec: - args: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1791,7 +1791,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2032,7 +2032,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2255,7 +2255,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2442,17 +2442,7 @@ data: grafana: enabled: false prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -2514,9 +2504,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2700,10 +2690,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml index 70c4c99024e75..974fca0b75850 100644 --- a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1107,7 +1107,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1328,7 +1328,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1528,7 +1528,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1597,7 +1597,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana-overwrite.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1795,7 +1795,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2036,7 +2036,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2259,7 +2259,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2449,17 +2449,7 @@ data: name: linkerd-image-overwrite name: linkerd-grafana-overwrite prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -2821,9 +2811,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3007,10 +2997,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_ha.golden b/cli/cmd/testdata/upgrade_ha.golden index 3de4eea553519..90e23e7c44fc6 100644 --- a/cli/cmd/testdata/upgrade_ha.golden +++ b/cli/cmd/testdata/upgrade_ha.golden @@ -904,7 +904,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1166,7 +1166,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1423,7 +1423,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1636,7 +1636,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" resources: limits: cpu: "1" @@ -1712,7 +1712,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1946,7 +1946,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2223,7 +2223,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2482,7 +2482,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2692,17 +2692,7 @@ data: limit: 1024Mi request: 50Mi prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 resources: cpu: limit: "4" @@ -3084,9 +3074,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3270,10 +3260,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden index 83e90c5f295ba..0323c9ca09725 100644 --- a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden +++ b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1107,7 +1107,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1328,7 +1328,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1528,7 +1528,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1597,7 +1597,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1795,7 +1795,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2036,7 +2036,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2259,7 +2259,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2449,17 +2449,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -2821,9 +2811,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3007,10 +2997,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_nothing_addon.yaml b/cli/cmd/testdata/upgrade_nothing_addon.yaml index 8e804f9e0ae4a..4d9fd62901161 100644 --- a/cli/cmd/testdata/upgrade_nothing_addon.yaml +++ b/cli/cmd/testdata/upgrade_nothing_addon.yaml @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1107,7 +1107,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1328,7 +1328,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1528,7 +1528,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1597,7 +1597,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1795,7 +1795,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2036,7 +2036,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2259,7 +2259,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2449,17 +2449,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -2821,9 +2811,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3007,10 +2997,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_overwrite_issuer.golden b/cli/cmd/testdata/upgrade_overwrite_issuer.golden index ed60a1757b1d2..89ac5e43eefec 100644 --- a/cli/cmd/testdata/upgrade_overwrite_issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_issuer.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1105,7 +1105,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1324,7 +1324,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1522,7 +1522,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1591,7 +1591,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1787,7 +1787,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2026,7 +2026,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2247,7 +2247,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2435,17 +2435,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -2805,9 +2795,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2991,10 +2981,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden index bb686e56d6ad4..a06df3cd25399 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden @@ -867,7 +867,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1091,7 +1091,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1310,7 +1310,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1508,7 +1508,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1577,7 +1577,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1773,7 +1773,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2012,7 +2012,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2233,7 +2233,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2421,17 +2421,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -2791,9 +2781,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2977,10 +2967,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden index ed60a1757b1d2..89ac5e43eefec 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1105,7 +1105,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1324,7 +1324,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1522,7 +1522,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1591,7 +1591,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1787,7 +1787,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2026,7 +2026,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2247,7 +2247,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2435,17 +2435,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -2805,9 +2795,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -2991,10 +2981,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden index 05d44d4c6760a..e70117151d553 100644 --- a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden +++ b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1107,7 +1107,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1328,7 +1328,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1528,7 +1528,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1597,7 +1597,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1795,7 +1795,7 @@ spec: containers: - args: - proxy-injector - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2036,7 +2036,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2259,7 +2259,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2449,17 +2449,7 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: - args: - config.file: /etc/prometheus/prometheus.yml - log.level: info - storage.tsdb.path: /data - storage.tsdb.retention.time: 6h enabled: true - globalConfig: - evaluation_interval: 10s - scrape_interval: 10s - scrape_timeout: 10s - image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -2821,9 +2811,9 @@ metadata: data: prometheus.yml: |- global: - evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s + evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3007,10 +2997,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/pkg/charts/linkerd2/values.go b/pkg/charts/linkerd2/values.go index 9c86954d190cf..72ffd90566ed0 100644 --- a/pkg/charts/linkerd2/values.go +++ b/pkg/charts/linkerd2/values.go @@ -19,35 +19,30 @@ const ( type ( // Values contains the top-level elements in the Helm charts Values struct { - Stage string `json:"stage"` - ControllerImage string `json:"controllerImage"` - ControllerImageVersion string `json:"controllerImageVersion"` - WebImage string `json:"webImage"` - ControllerReplicas uint `json:"controllerReplicas"` - ControllerLogLevel string `json:"controllerLogLevel"` - PrometheusLogLevel string `json:"prometheusLogLevel"` - PrometheusExtraArgs map[string]string `json:"prometheusExtraArgs"` - PrometheusAlertmanagers []interface{} `json:"prometheusAlertmanagers"` - PrometheusRuleConfigMapMounts []PrometheusRuleConfigMapMount `json:"prometheusRuleConfigMapMounts"` - ControllerUID int64 `json:"controllerUID"` - EnableH2Upgrade bool `json:"enableH2Upgrade"` - EnablePodAntiAffinity bool `json:"enablePodAntiAffinity"` - WebhookFailurePolicy string `json:"webhookFailurePolicy"` - OmitWebhookSideEffects bool `json:"omitWebhookSideEffects"` - RestrictDashboardPrivileges bool `json:"restrictDashboardPrivileges"` - DisableHeartBeat bool `json:"disableHeartBeat"` - HeartbeatSchedule string `json:"heartbeatSchedule"` - InstallNamespace bool `json:"installNamespace"` - Configs ConfigJSONs `json:"configs"` - Global *Global `json:"global"` - Identity *Identity `json:"identity"` - Dashboard *Dashboard `json:"dashboard"` - DebugContainer *DebugContainer `json:"debugContainer"` - ProxyInjector *ProxyInjector `json:"proxyInjector"` - ProfileValidator *ProfileValidator `json:"profileValidator"` - Tap *Tap `json:"tap"` - NodeSelector map[string]string `json:"nodeSelector"` - SMIMetrics *SMIMetrics `json:"smiMetrics"` + Stage string `json:"stage"` + ControllerImage string `json:"controllerImage"` + ControllerImageVersion string `json:"controllerImageVersion"` + WebImage string `json:"webImage"` + ControllerReplicas uint `json:"controllerReplicas"` + ControllerUID int64 `json:"controllerUID"` + EnableH2Upgrade bool `json:"enableH2Upgrade"` + EnablePodAntiAffinity bool `json:"enablePodAntiAffinity"` + WebhookFailurePolicy string `json:"webhookFailurePolicy"` + OmitWebhookSideEffects bool `json:"omitWebhookSideEffects"` + RestrictDashboardPrivileges bool `json:"restrictDashboardPrivileges"` + DisableHeartBeat bool `json:"disableHeartBeat"` + HeartbeatSchedule string `json:"heartbeatSchedule"` + InstallNamespace bool `json:"installNamespace"` + Configs ConfigJSONs `json:"configs"` + Global *Global `json:"global"` + Identity *Identity `json:"identity"` + Dashboard *Dashboard `json:"dashboard"` + DebugContainer *DebugContainer `json:"debugContainer"` + ProxyInjector *ProxyInjector `json:"proxyInjector"` + ProfileValidator *ProfileValidator `json:"profileValidator"` + Tap *Tap `json:"tap"` + NodeSelector map[string]string `json:"nodeSelector"` + SMIMetrics *SMIMetrics `json:"smiMetrics"` DestinationResources *Resources `json:"destinationResources"` HeartbeatResources *Resources `json:"heartbeatResources"` @@ -73,6 +68,7 @@ type ( CliVersion string `json:"cliVersion"` ControllerComponentLabel string `json:"controllerComponentLabel"` ControllerImageVersion string `json:"controllerImageVersion"` + ControllerLogLevel string `json:"controllerLogLevel"` ControllerNamespaceLabel string `json:"controllerNamespaceLabel"` WorkloadNamespaceLabel string `json:"workloadNamespaceLabel"` CreatedByAnnotation string `json:"createdByAnnotation"` diff --git a/pkg/charts/linkerd2/values_test.go b/pkg/charts/linkerd2/values_test.go index bb360d9703e72..0cfb1f666b3fa 100644 --- a/pkg/charts/linkerd2/values_test.go +++ b/pkg/charts/linkerd2/values_test.go @@ -18,7 +18,6 @@ func TestNewValues(t *testing.T) { ControllerImage: "gcr.io/linkerd-io/controller", WebImage: "gcr.io/linkerd-io/web", ControllerReplicas: 1, - ControllerLogLevel: "info", ControllerUID: 2103, EnableH2Upgrade: true, EnablePodAntiAffinity: false, @@ -29,8 +28,7 @@ func TestNewValues(t *testing.T) { HeartbeatSchedule: "0 0 * * *", InstallNamespace: true, Prometheus: Prometheus{ - "enabled": true, - "logLevel": "info", + "enabled": true, }, Global: &Global{ Namespace: "linkerd", @@ -38,6 +36,7 @@ func TestNewValues(t *testing.T) { ImagePullPolicy: "IfNotPresent", CliVersion: "linkerd/cli dev-undefined", ControllerComponentLabel: "linkerd.io/control-plane-component", + ControllerLogLevel: "info", ControllerImageVersion: testVersion, ControllerNamespaceLabel: "linkerd.io/control-plane-ns", WorkloadNamespaceLabel: "linkerd.io/workload-ns", @@ -213,8 +212,7 @@ func TestNewValues(t *testing.T) { } expected.Prometheus = Prometheus{ - "enabled": true, - "logLevel": "info", + "enabled": true, "resources": map[string]interface{}{ "cpu": map[string]interface{}{ "limit": "4", From 3be8cf39411b5ef200bc696107e3e1e3ccaa8738 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Fri, 19 Jun 2020 10:16:28 +0000 Subject: [PATCH 26/42] update golden files Signed-off-by: Tarun Pothulapati --- .../install_addon_control-plane.golden | 15 +- cli/cmd/testdata/install_control-plane.golden | 18 +- ...stall_default_override_dst_get_nets.golden | 880 +++++++++--------- .../install_helm_output_addons.golden | 15 +- cli/cmd/testdata/install_output.golden | 204 +--- .../install_prometheus_overwrite.golden | 6 +- cli/cmd/testdata/install_tracing.golden | 15 +- .../testdata/install_tracing_overwrite.golden | 15 +- .../upgrade_add-on_controlplane.golden | 15 +- .../testdata/upgrade_add-on_overwrite.golden | 15 +- cli/cmd/testdata/upgrade_add_add-on.golden | 15 +- 11 files changed, 506 insertions(+), 707 deletions(-) diff --git a/cli/cmd/testdata/install_addon_control-plane.golden b/cli/cmd/testdata/install_addon_control-plane.golden index 0af1da67b3650..6a96eacddef37 100644 --- a/cli/cmd/testdata/install_addon_control-plane.golden +++ b/cli/cmd/testdata/install_addon_control-plane.golden @@ -1778,6 +1778,10 @@ spec: - env: - name: GF_PATHS_DATA value: /data + # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments + # see https://github.com/grafana/grafana/issues/20096 + - name: GODEBUG + value: netdns=go image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2623,14 +2627,9 @@ spec: linkerd.io/proxy-deployment: linkerd-jaeger spec: containers: - - env: - - name: GF_PATHS_DATA - value: /data - # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments - # see https://github.com/grafana/grafana/issues/20096 - - name: GODEBUG - value: netdns=go - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - --query.base-path=/jaeger + image: jaegertracing/all-in-one:1.17.1 imagePullPolicy: IfNotPresent name: jaeger ports: diff --git a/cli/cmd/testdata/install_control-plane.golden b/cli/cmd/testdata/install_control-plane.golden index 259d0de650294..6fb4fc6755282 100644 --- a/cli/cmd/testdata/install_control-plane.golden +++ b/cli/cmd/testdata/install_control-plane.golden @@ -1771,6 +1771,10 @@ spec: - env: - name: GF_PATHS_DATA value: /data + # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments + # see https://github.com/grafana/grafana/issues/20096 + - name: GODEBUG + value: netdns=go image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2137,14 +2141,12 @@ spec: nodeSelector: beta.kubernetes.io/os: linux containers: - - env: - - name: GF_PATHS_DATA - value: /data - # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments - # see https://github.com/grafana/grafana/issues/20096 - - name: GODEBUG - value: netdns=go - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: httpGet: diff --git a/cli/cmd/testdata/install_default_override_dst_get_nets.golden b/cli/cmd/testdata/install_default_override_dst_get_nets.golden index d6522b72bdef2..17b7002eb4862 100644 --- a/cli/cmd/testdata/install_default_override_dst_get_nets.golden +++ b/cli/cmd/testdata/install_default_override_dst_get_nets.golden @@ -380,47 +380,6 @@ spec: JSONPath: .spec.service --- ### -### Prometheus RBAC -### ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -rules: -- apiGroups: [""] - resources: ["nodes", "nodes/proxy", "pods"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: linkerd-linkerd-prometheus - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: linkerd-linkerd-prometheus -subjects: -- kind: ServiceAccount - name: linkerd-prometheus - namespace: linkerd ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd ---- -### ### Proxy Injector RBAC ### --- @@ -922,7 +881,7 @@ spec: containers: - args: - identity - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1146,7 +1105,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1365,7 +1324,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1563,7 +1522,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=info" + - "-log-level=" securityContext: runAsUser: 2103 --- @@ -1632,7 +1591,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level=info + - -log-level= - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1790,192 +1749,27 @@ spec: name: linkerd-identity-end-entity --- ### -### Prometheus +### Proxy Injector ### --- -kind: ConfigMap -apiVersion: v1 -metadata: - name: linkerd-prometheus-config - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -data: - prometheus.yml: |- - global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s - - rule_files: - - /etc/prometheus/*_rules.yml - - /etc/prometheus/*_rules.yaml - - scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - - job_name: 'grafana' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - action: keep - regex: ^grafana$ - - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - - job_name: 'linkerd-controller' - kubernetes_sd_configs: - - role: pod - namespaces: - names: ['linkerd'] - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: (.*);admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - - job_name: 'linkerd-proxy' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_container_name - - __meta_kubernetes_pod_container_port_name - - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns - action: keep - regex: ^linkerd-proxy;linkerd-admin;linkerd$ - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - # special case k8s' "job" label, to not interfere with prometheus' "job" - # label - # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => - # k8s_job=foo - - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] - action: replace - target_label: k8s_job - # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job - # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => - # deployment=foo - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # drop all labels that we just made copies of in the previous labelmap - - action: labeldrop - regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) - # __meta_kubernetes_pod_label_linkerd_io_foo=bar => - # foo=bar - - action: labelmap - regex: __meta_kubernetes_pod_label_linkerd_io_(.+) - # Copy all pod labels to tmp labels - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - replacement: __tmp_pod_label_$1 - # Take `linkerd_io_` prefixed labels and copy them without the prefix - - action: labelmap - regex: __tmp_pod_label_linkerd_io_(.+) - replacement: __tmp_pod_label_$1 - # Drop the `linkerd_io_` originals - - action: labeldrop - regex: __tmp_pod_label_linkerd_io_(.+) - # Copy tmp labels into real labels - - action: labelmap - regex: __tmp_pod_label_(.+) ---- -kind: Service -apiVersion: v1 -metadata: - name: linkerd-prometheus - namespace: linkerd - labels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined -spec: - type: ClusterIP - selector: - linkerd.io/control-plane-component: prometheus - ports: - - name: admin-http - port: 9090 - targetPort: 9090 ---- apiVersion: apps/v1 kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined labels: - app.kubernetes.io/name: prometheus + app.kubernetes.io/name: proxy-injector app.kubernetes.io/part-of: Linkerd app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd - name: linkerd-prometheus + name: linkerd-proxy-injector namespace: linkerd spec: replicas: 1 selector: matchLabels: - linkerd.io/control-plane-component: prometheus - linkerd.io/control-plane-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/control-plane-component: proxy-injector template: metadata: annotations: @@ -1983,242 +1777,37 @@ spec: linkerd.io/identity-mode: default linkerd.io/proxy-version: install-proxy-version labels: - linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-component: proxy-injector linkerd.io/control-plane-ns: linkerd linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-prometheus + linkerd.io/proxy-deployment: linkerd-proxy-injector spec: nodeSelector: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 + - proxy-injector + - -log-level= + image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus + path: /ping + port: 9995 + initialDelaySeconds: 10 + name: proxy-injector ports: - - containerPort: 9090 + - containerPort: 8443 + name: proxy-injector + - containerPort: 9995 name: admin-http readinessProbe: + failureThreshold: 7 httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 + path: /ready + port: 9995 securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.0.0.0/8" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 2103 + runAsUser: 2103 volumeMounts: - mountPath: /var/run/linkerd/config name: config @@ -2437,7 +2026,7 @@ spec: containers: - args: - sp-validator - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2658,7 +2247,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level=info + - -log-level= image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2845,6 +2434,8 @@ data: image: name: gcr.io/linkerd-io/grafana name: linkerd-grafana + prometheus: + enabled: true tracing: enabled: false --- @@ -3149,3 +2740,414 @@ spec: - emptyDir: medium: Memory name: linkerd-identity-end-entity +--- +### +### Prometheus RBAC +### +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +rules: +- apiGroups: [""] + resources: ["nodes", "nodes/proxy", "pods"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: linkerd-linkerd-prometheus + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: linkerd-linkerd-prometheus +subjects: +- kind: ServiceAccount + name: linkerd-prometheus + namespace: linkerd +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd +--- +### +### Prometheus +### +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: linkerd-prometheus-config + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +data: + prometheus.yml: |- + global: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s + + rule_files: + - /etc/prometheus/*_rules.yml + - /etc/prometheus/*_rules.yaml + + scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'grafana' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + action: keep + regex: ^grafana$ + + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + + - job_name: 'linkerd-controller' + kubernetes_sd_configs: + - role: pod + namespaces: + names: ['linkerd'] + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: (.*);admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component + + - job_name: 'linkerd-proxy' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_container_name + - __meta_kubernetes_pod_container_port_name + - __meta_kubernetes_pod_label_linkerd_io_control_plane_ns + action: keep + regex: ^linkerd-proxy;linkerd-admin;linkerd$ + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + # special case k8s' "job" label, to not interfere with prometheus' "job" + # label + # __meta_kubernetes_pod_label_linkerd_io_proxy_job=foo => + # k8s_job=foo + - source_labels: [__meta_kubernetes_pod_label_linkerd_io_proxy_job] + action: replace + target_label: k8s_job + # drop __meta_kubernetes_pod_label_linkerd_io_proxy_job + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_job + # __meta_kubernetes_pod_label_linkerd_io_proxy_deployment=foo => + # deployment=foo + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # drop all labels that we just made copies of in the previous labelmap + - action: labeldrop + regex: __meta_kubernetes_pod_label_linkerd_io_proxy_(.+) + # __meta_kubernetes_pod_label_linkerd_io_foo=bar => + # foo=bar + - action: labelmap + regex: __meta_kubernetes_pod_label_linkerd_io_(.+) + # Copy all pod labels to tmp labels + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + replacement: __tmp_pod_label_$1 + # Take `linkerd_io_` prefixed labels and copy them without the prefix + - action: labelmap + regex: __tmp_pod_label_linkerd_io_(.+) + replacement: __tmp_pod_label_$1 + # Drop the `linkerd_io_` originals + - action: labeldrop + regex: __tmp_pod_label_linkerd_io_(.+) + # Copy tmp labels into real labels + - action: labelmap + regex: __tmp_pod_label_(.+) +--- +kind: Service +apiVersion: v1 +metadata: + name: linkerd-prometheus + namespace: linkerd + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined +spec: + type: ClusterIP + selector: + linkerd.io/control-plane-component: prometheus + ports: + - name: admin-http + port: 9090 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + labels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: Linkerd + app.kubernetes.io/version: install-control-plane-version + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + name: linkerd-prometheus + namespace: linkerd +spec: + replicas: 1 + selector: + matchLabels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + template: + metadata: + annotations: + linkerd.io/created-by: linkerd/cli dev-undefined + linkerd.io/identity-mode: default + linkerd.io/proxy-version: install-proxy-version + labels: + linkerd.io/control-plane-component: prometheus + linkerd.io/control-plane-ns: linkerd + linkerd.io/workload-ns: linkerd + linkerd.io/proxy-deployment: linkerd-prometheus + spec: + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - args: + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + image: prom/prometheus:v2.15.2 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /-/healthy + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + name: prometheus + ports: + - containerPort: 9090 + name: admin-http + readinessProbe: + httpGet: + path: /-/ready + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + runAsUser: 65534 + volumeMounts: + - mountPath: /data + name: data + - mountPath: /etc/prometheus/prometheus.yml + name: prometheus-config + subPath: prometheus.yml + readOnly: true + - env: + - name: LINKERD2_PROXY_LOG + value: warn,linkerd=info + - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR + value: linkerd-dst.linkerd.svc.cluster.local:8086 + - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS + value: "10.0.0.0/8,172.0.0.0/8" + - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR + value: 0.0.0.0:4190 + - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR + value: 0.0.0.0:4191 + - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR + value: 127.0.0.1:4140 + - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR + value: 0.0.0.0:4143 + - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES + value: svc.cluster.local. + - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE + value: 10000ms + - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE + value: 10000ms + - name: _pod_ns + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LINKERD2_PROXY_DESTINATION_CONTEXT + value: ns:$(_pod_ns) + - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY + value: "10000" + - name: LINKERD2_PROXY_IDENTITY_DIR + value: /var/run/linkerd/identity/end-entity + - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS + value: | + -----BEGIN CERTIFICATE----- + MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy + LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE + AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 + xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 + 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF + BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE + AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv + OLO4Zsk1XrGZHGsmyiEyvYF9lpY= + -----END CERTIFICATE----- + - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/token + - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR + value: linkerd-identity.linkerd.svc.cluster.local:8080 + - name: _pod_sa + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: _l5d_ns + value: linkerd + - name: _l5d_trustdomain + value: cluster.local + - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME + value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_IDENTITY_SVC_NAME + value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_DESTINATION_SVC_NAME + value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + - name: LINKERD2_PROXY_TAP_SVC_NAME + value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) + image: gcr.io/linkerd-io/proxy:install-proxy-version + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /live + port: 4191 + initialDelaySeconds: 10 + name: linkerd-proxy + ports: + - containerPort: 4143 + name: linkerd-proxy + - containerPort: 4191 + name: linkerd-admin + readinessProbe: + httpGet: + path: /ready + port: 4191 + initialDelaySeconds: 2 + resources: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 2102 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/run/linkerd/identity/end-entity + name: linkerd-identity-end-entity + initContainers: + - args: + - --incoming-proxy-port + - "4143" + - --outgoing-proxy-port + - "4140" + - --proxy-uid + - "2102" + - --inbound-ports-to-ignore + - 4190,4191 + - --outbound-ports-to-ignore + - "443" + image: gcr.io/linkerd-io/proxy-init:v1.3.3 + imagePullPolicy: IfNotPresent + name: linkerd-init + resources: + limits: + cpu: "100m" + memory: "50Mi" + requests: + cpu: "10m" + memory: "10Mi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + serviceAccountName: linkerd-prometheus + volumes: + - emptyDir: {} + name: data + - configMap: + name: linkerd-prometheus-config + name: prometheus-config + - emptyDir: + medium: Memory + name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_helm_output_addons.golden b/cli/cmd/testdata/install_helm_output_addons.golden index 4126692d284e6..9661f6c097e0e 100644 --- a/cli/cmd/testdata/install_helm_output_addons.golden +++ b/cli/cmd/testdata/install_helm_output_addons.golden @@ -2631,6 +2631,10 @@ spec: - env: - name: GF_PATHS_DATA value: /data + # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments + # see https://github.com/grafana/grafana/issues/20096 + - name: GODEBUG + value: netdns=go image: gcr.io/linkerd-io/grafana:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -3524,14 +3528,9 @@ spec: linkerd.io/proxy-deployment: linkerd-jaeger spec: containers: - - env: - - name: GF_PATHS_DATA - value: /data - # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments - # see https://github.com/grafana/grafana/issues/20096 - - name: GODEBUG - value: netdns=go - image: gcr.io/linkerd-io/grafana:linkerd-version + - args: + - --query.base-path=/jaeger + image: jaegertracing/all-in-one:1.17.1 imagePullPolicy: IfNotPresent name: jaeger ports: diff --git a/cli/cmd/testdata/install_output.golden b/cli/cmd/testdata/install_output.golden index 77e0850fad48c..e8e82421e187a 100644 --- a/cli/cmd/testdata/install_output.golden +++ b/cli/cmd/testdata/install_output.golden @@ -1793,208 +1793,6 @@ spec: initialDelaySeconds: 10 name: proxy-injector ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.Namespace.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "DestinationGetNetworks" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.Namespace.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: Namespace - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: ProxyImageName:ProxyVersion - imagePullPolicy: ImagePullPolicy - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: ProxyInitImageName:ProxyInitVersion - imagePullPolicy: ImagePullPolicy - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - emptyDir: {} - name: data - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - CreatedByAnnotation: CliVersion - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: ControllerImageVersion - ControllerComponentLabel: proxy-injector - ControllerNamespaceLabel: Namespace - name: linkerd-proxy-injector - namespace: Namespace -spec: - replicas: 1 - selector: - matchLabels: - ControllerComponentLabel: proxy-injector - template: - metadata: - annotations: - CreatedByAnnotation: CliVersion - linkerd.io/identity-mode: default - linkerd.io/proxy-version: ProxyVersion - labels: - ControllerComponentLabel: proxy-injector - ControllerNamespaceLabel: Namespace - WorkloadNamespaceLabel: Namespace - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=ControllerLogLevel - image: ControllerImage:ControllerImageVersion - imagePullPolicy: ImagePullPolicy - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - containerPort: 8443 name: proxy-injector - containerPort: 9995 @@ -3217,7 +3015,7 @@ spec: - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.Namespace.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + value: "DestinationGetNetworks" - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR value: 0.0.0.0:4190 - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR diff --git a/cli/cmd/testdata/install_prometheus_overwrite.golden b/cli/cmd/testdata/install_prometheus_overwrite.golden index 06697fc09e4e3..84b73be7f86d4 100644 --- a/cli/cmd/testdata/install_prometheus_overwrite.golden +++ b/cli/cmd/testdata/install_prometheus_overwrite.golden @@ -802,7 +802,7 @@ data: global: | {"linkerdNamespace":"linkerd","cniEnabled":false,"version":"install-control-plane-version","identityContext":{"trustDomain":"cluster.local","trustAnchorsPem":"-----BEGIN CERTIFICATE-----\nMIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy\nLmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE\nAxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0\nxtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364\n6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF\nBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE\nAiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv\nOLO4Zsk1XrGZHGsmyiEyvYF9lpY=\n-----END CERTIFICATE-----\n","issuanceLifetime":"86400s","clockSkewAllowance":"20s","scheme":"linkerd.io/tls"},"autoInjectContext":null,"omitWebhookSideEffects":false,"clusterDomain":"cluster.local"} proxy: | - {"proxyImage":{"imageName":"gcr.io/linkerd-io/proxy","pullPolicy":"IfNotPresent"},"proxyInitImage":{"imageName":"gcr.io/linkerd-io/proxy-init","pullPolicy":"IfNotPresent"},"controlPort":{"port":4190},"ignoreInboundPorts":[],"ignoreOutboundPorts":[],"inboundPort":{"port":4143},"adminPort":{"port":4191},"outboundPort":{"port":4140},"resource":{"requestCpu":"","requestMemory":"","limitCpu":"","limitMemory":""},"proxyUid":"2102","logLevel":{"level":"warn,linkerd=info"},"disableExternalProfiles":true,"proxyVersion":"install-proxy-version","proxyInitImageVersion":"v1.3.3","debugImage":{"imageName":"gcr.io/linkerd-io/debug","pullPolicy":"IfNotPresent"},"debugImageVersion":"install-debug-version"} + {"proxyImage":{"imageName":"gcr.io/linkerd-io/proxy","pullPolicy":"IfNotPresent"},"proxyInitImage":{"imageName":"gcr.io/linkerd-io/proxy-init","pullPolicy":"IfNotPresent"},"controlPort":{"port":4190},"ignoreInboundPorts":[],"ignoreOutboundPorts":[],"inboundPort":{"port":4143},"adminPort":{"port":4191},"outboundPort":{"port":4140},"resource":{"requestCpu":"","requestMemory":"","limitCpu":"","limitMemory":""},"proxyUid":"2102","logLevel":{"level":"warn,linkerd=info"},"disableExternalProfiles":true,"proxyVersion":"install-proxy-version","proxyInitImageVersion":"v1.3.3","debugImage":{"imageName":"gcr.io/linkerd-io/debug","pullPolicy":"IfNotPresent"},"debugImageVersion":"install-debug-version","destinationGetNetworks":"10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"} install: | {"cliVersion":"dev-undefined","flags":[]} --- @@ -2607,6 +2607,10 @@ spec: - env: - name: GF_PATHS_DATA value: /data + # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments + # see https://github.com/grafana/grafana/issues/20096 + - name: GODEBUG + value: netdns=go image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_tracing.golden b/cli/cmd/testdata/install_tracing.golden index 5a9e32ad05b00..e1b37e8a488b2 100644 --- a/cli/cmd/testdata/install_tracing.golden +++ b/cli/cmd/testdata/install_tracing.golden @@ -2580,6 +2580,10 @@ spec: - env: - name: GF_PATHS_DATA value: /data + # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments + # see https://github.com/grafana/grafana/issues/20096 + - name: GODEBUG + value: netdns=go image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -3492,14 +3496,9 @@ spec: linkerd.io/proxy-deployment: linkerd-jaeger spec: containers: - - env: - - name: GF_PATHS_DATA - value: /data - # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments - # see https://github.com/grafana/grafana/issues/20096 - - name: GODEBUG - value: netdns=go - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - --query.base-path=/jaeger + image: jaegertracing/all-in-one:1.17.1 imagePullPolicy: IfNotPresent name: jaeger ports: diff --git a/cli/cmd/testdata/install_tracing_overwrite.golden b/cli/cmd/testdata/install_tracing_overwrite.golden index ef9c34105feec..68c05c850b94b 100644 --- a/cli/cmd/testdata/install_tracing_overwrite.golden +++ b/cli/cmd/testdata/install_tracing_overwrite.golden @@ -2580,6 +2580,10 @@ spec: - env: - name: GF_PATHS_DATA value: /data + # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments + # see https://github.com/grafana/grafana/issues/20096 + - name: GODEBUG + value: netdns=go image: gcr.io/linkerd-io/grafana:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -3490,14 +3494,9 @@ spec: linkerd.io/proxy-deployment: linkerd-jaeger spec: containers: - - env: - - name: GF_PATHS_DATA - value: /data - # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments - # see https://github.com/grafana/grafana/issues/20096 - - name: GODEBUG - value: netdns=go - image: gcr.io/linkerd-io/grafana:install-control-plane-version + - args: + - --query.base-path=/jaeger + image: jaegertracing/all-in-one:1.17.1 imagePullPolicy: IfNotPresent name: jaeger ports: diff --git a/cli/cmd/testdata/upgrade_add-on_controlplane.golden b/cli/cmd/testdata/upgrade_add-on_controlplane.golden index 232bdd1b54287..b0086b1ce62ff 100644 --- a/cli/cmd/testdata/upgrade_add-on_controlplane.golden +++ b/cli/cmd/testdata/upgrade_add-on_controlplane.golden @@ -1792,6 +1792,10 @@ spec: - env: - name: GF_PATHS_DATA value: /data + # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments + # see https://github.com/grafana/grafana/issues/20096 + - name: GODEBUG + value: netdns=go image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2643,14 +2647,9 @@ spec: linkerd.io/proxy-deployment: linkerd-jaeger spec: containers: - - env: - - name: GF_PATHS_DATA - value: /data - # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments - # see https://github.com/grafana/grafana/issues/20096 - - name: GODEBUG - value: netdns=go - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - --query.base-path=/jaeger + image: jaegertracing/all-in-one:1.17.1 imagePullPolicy: IfNotPresent name: jaeger ports: diff --git a/cli/cmd/testdata/upgrade_add-on_overwrite.golden b/cli/cmd/testdata/upgrade_add-on_overwrite.golden index 609d386d141f4..34f84123ac3e7 100644 --- a/cli/cmd/testdata/upgrade_add-on_overwrite.golden +++ b/cli/cmd/testdata/upgrade_add-on_overwrite.golden @@ -2596,6 +2596,10 @@ spec: - env: - name: GF_PATHS_DATA value: /data + # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments + # see https://github.com/grafana/grafana/issues/20096 + - name: GODEBUG + value: netdns=go image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -3512,14 +3516,9 @@ spec: linkerd.io/proxy-deployment: linkerd-jaeger spec: containers: - - env: - - name: GF_PATHS_DATA - value: /data - # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments - # see https://github.com/grafana/grafana/issues/20096 - - name: GODEBUG - value: netdns=go - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - --query.base-path=/jaeger + image: jaegertracing/all-in-one:1.17.1 imagePullPolicy: IfNotPresent name: jaeger ports: diff --git a/cli/cmd/testdata/upgrade_add_add-on.golden b/cli/cmd/testdata/upgrade_add_add-on.golden index 3b193868186ce..ae93d11de5dc0 100644 --- a/cli/cmd/testdata/upgrade_add_add-on.golden +++ b/cli/cmd/testdata/upgrade_add_add-on.golden @@ -2594,6 +2594,10 @@ spec: - env: - name: GF_PATHS_DATA value: /data + # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments + # see https://github.com/grafana/grafana/issues/20096 + - name: GODEBUG + value: netdns=go image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -3512,14 +3516,9 @@ spec: linkerd.io/proxy-deployment: linkerd-jaeger spec: containers: - - env: - - name: GF_PATHS_DATA - value: /data - # Force using the go-based DNS resolver instead of the OS' to avoid failures in some environments - # see https://github.com/grafana/grafana/issues/20096 - - name: GODEBUG - value: netdns=go - image: gcr.io/linkerd-io/grafana:UPGRADE-CONTROL-PLANE-VERSION + - args: + - --query.base-path=/jaeger + image: jaegertracing/all-in-one:1.17.1 imagePullPolicy: IfNotPresent name: jaeger ports: From 2fd46b91c7251185e04c662073d1e0c328bcddfc Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Fri, 19 Jun 2020 10:34:09 +0000 Subject: [PATCH 27/42] remove unnecessar functions related promLog Signed-off-by: Tarun Pothulapati --- cli/cmd/install.go | 9 --------- cli/cmd/root.go | 7 +++---- 2 files changed, 3 insertions(+), 13 deletions(-) diff --git a/cli/cmd/install.go b/cli/cmd/install.go index db2af1ca318d8..8a94310c9e08e 100644 --- a/cli/cmd/install.go +++ b/cli/cmd/install.go @@ -803,15 +803,6 @@ func (options *installOptions) buildValuesWithoutIdentity(configs *pb.All) (*l5d return installValues, nil } -func toPromLogLevel(level string) string { - switch level { - case "panic", "fatal": - return "error" - default: - return level - } -} - func render(w io.Writer, values *l5dcharts.Values) error { // Render raw values and create chart config rawValues, err := yaml.Marshal(values) diff --git a/cli/cmd/root.go b/cli/cmd/root.go index f1528ea21028d..6276c0782eeee 100644 --- a/cli/cmd/root.go +++ b/cli/cmd/root.go @@ -55,10 +55,9 @@ var ( // These regexs are not as strict as they could be, but are a quick and dirty // sanity check against illegal characters. - alphaNumDash = regexp.MustCompile(`^[a-zA-Z0-9-]+$`) - alphaNumDashDot = regexp.MustCompile(`^[\.a-zA-Z0-9-]+$`) - alphaNumDashDotSlashColon = regexp.MustCompile(`^[\./a-zA-Z0-9-:]+$`) - alphaNumDashDotSlashColonUnderscore = regexp.MustCompile(`^[\./a-zA-Z0-9-:_]+$`) + alphaNumDash = regexp.MustCompile(`^[a-zA-Z0-9-]+$`) + alphaNumDashDot = regexp.MustCompile(`^[\.a-zA-Z0-9-]+$`) + alphaNumDashDotSlashColon = regexp.MustCompile(`^[\./a-zA-Z0-9-:]+$`) // Full Rust log level syntax at // https://docs.rs/env_logger/0.6.0/env_logger/#enabling-logging From 1a0a212b4496f0b523e5f8ad78c329754e0818da Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Fri, 19 Jun 2020 11:59:43 +0000 Subject: [PATCH 28/42] use global.controllerLogLevel Signed-off-by: Tarun Pothulapati --- charts/linkerd2/templates/controller.yaml | 2 +- charts/linkerd2/templates/destination.yaml | 2 +- charts/linkerd2/templates/heartbeat.yaml | 2 +- charts/linkerd2/templates/identity.yaml | 2 +- charts/linkerd2/templates/proxy-injector.yaml | 2 +- charts/linkerd2/templates/sp-validator.yaml | 2 +- charts/linkerd2/templates/tap.yaml | 2 +- charts/linkerd2/templates/web.yaml | 2 +- test/install_test.go | 4 ++-- 9 files changed, 10 insertions(+), 10 deletions(-) diff --git a/charts/linkerd2/templates/controller.yaml b/charts/linkerd2/templates/controller.yaml index 8454b01c0578e..55654b96d9d1e 100644 --- a/charts/linkerd2/templates/controller.yaml +++ b/charts/linkerd2/templates/controller.yaml @@ -72,7 +72,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.{{.Values.global.namespace}}.svc.{{.Values.global.clusterDomain}}:9090 - -destination-addr=linkerd-dst.{{.Values.global.namespace}}.svc.{{.Values.global.clusterDomain}}:8086 - -controller-namespace={{.Values.global.namespace}} - - -log-level={{.Values.controllerLogLevel}} + - -log-level={{.Values.global.controllerLogLevel}} {{- include "partials.linkerd.trace" . | nindent 8 -}} image: {{.Values.controllerImage}}:{{default .Values.global.linkerdVersion .Values.global.controllerImageVersion}} imagePullPolicy: {{.Values.global.imagePullPolicy}} diff --git a/charts/linkerd2/templates/destination.yaml b/charts/linkerd2/templates/destination.yaml index 82e97df5bbf46..bb94983b915da 100644 --- a/charts/linkerd2/templates/destination.yaml +++ b/charts/linkerd2/templates/destination.yaml @@ -72,7 +72,7 @@ spec: - -addr=:8086 - -controller-namespace={{.Values.global.namespace}} - -enable-h2-upgrade={{.Values.enableH2Upgrade}} - - -log-level={{.Values.controllerLogLevel}} + - -log-level={{.Values.global.controllerLogLevel}} {{- include "partials.linkerd.trace" . | nindent 8 -}} image: {{.Values.controllerImage}}:{{default .Values.global.linkerdVersion .Values.global.controllerImageVersion}} imagePullPolicy: {{.Values.global.imagePullPolicy}} diff --git a/charts/linkerd2/templates/heartbeat.yaml b/charts/linkerd2/templates/heartbeat.yaml index 66d8906ef479b..3e513adfbc1fd 100644 --- a/charts/linkerd2/templates/heartbeat.yaml +++ b/charts/linkerd2/templates/heartbeat.yaml @@ -41,7 +41,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.{{.Values.global.namespace}}.svc.{{.Values.global.clusterDomain}}:9090" - "-controller-namespace={{.Values.global.namespace}}" - - "-log-level={{.Values.controllerLogLevel}}" + - "-log-level={{.Values.global.controllerLogLevel}}" {{- if .Values.heartbeatResources -}} {{- include "partials.resources" .Values.heartbeatResources | nindent 12 }} {{- end }} diff --git a/charts/linkerd2/templates/identity.yaml b/charts/linkerd2/templates/identity.yaml index 169c020d80692..8a2d16285c4bc 100644 --- a/charts/linkerd2/templates/identity.yaml +++ b/charts/linkerd2/templates/identity.yaml @@ -89,7 +89,7 @@ spec: containers: - args: - identity - - -log-level={{.Values.controllerLogLevel}} + - -log-level={{.Values.global.controllerLogLevel}} {{- include "partials.linkerd.trace" . | nindent 8 -}} image: {{.Values.controllerImage}}:{{default .Values.global.linkerdVersion .Values.global.controllerImageVersion}} imagePullPolicy: {{.Values.global.imagePullPolicy}} diff --git a/charts/linkerd2/templates/proxy-injector.yaml b/charts/linkerd2/templates/proxy-injector.yaml index 0f5e9e0f9bb53..196812b611ccb 100644 --- a/charts/linkerd2/templates/proxy-injector.yaml +++ b/charts/linkerd2/templates/proxy-injector.yaml @@ -51,7 +51,7 @@ spec: containers: - args: - proxy-injector - - -log-level={{.Values.controllerLogLevel}} + - -log-level={{.Values.global.controllerLogLevel}} image: {{.Values.controllerImage}}:{{default .Values.global.linkerdVersion .Values.global.controllerImageVersion}} imagePullPolicy: {{.Values.global.imagePullPolicy}} livenessProbe: diff --git a/charts/linkerd2/templates/sp-validator.yaml b/charts/linkerd2/templates/sp-validator.yaml index b19e6c243b46f..2e364dcbd4385 100644 --- a/charts/linkerd2/templates/sp-validator.yaml +++ b/charts/linkerd2/templates/sp-validator.yaml @@ -70,7 +70,7 @@ spec: containers: - args: - sp-validator - - -log-level={{.Values.controllerLogLevel}} + - -log-level={{.Values.global.controllerLogLevel}} image: {{.Values.controllerImage}}:{{default .Values.global.linkerdVersion .Values.global.controllerImageVersion}} imagePullPolicy: {{.Values.global.imagePullPolicy}} livenessProbe: diff --git a/charts/linkerd2/templates/tap.yaml b/charts/linkerd2/templates/tap.yaml index fc5c1d957466d..51a1282104e68 100644 --- a/charts/linkerd2/templates/tap.yaml +++ b/charts/linkerd2/templates/tap.yaml @@ -76,7 +76,7 @@ spec: - args: - tap - -controller-namespace={{.Values.global.namespace}} - - -log-level={{.Values.controllerLogLevel}} + - -log-level={{.Values.global.controllerLogLevel}} {{- include "partials.linkerd.trace" . | nindent 8 -}} image: {{.Values.controllerImage}}:{{default .Values.global.linkerdVersion .Values.global.controllerImageVersion}} imagePullPolicy: {{.Values.global.imagePullPolicy}} diff --git a/charts/linkerd2/templates/web.yaml b/charts/linkerd2/templates/web.yaml index 8221695c3766b..bb64b18d8d2d5 100644 --- a/charts/linkerd2/templates/web.yaml +++ b/charts/linkerd2/templates/web.yaml @@ -72,7 +72,7 @@ spec: - -jaeger-addr={{.Values.tracing.jaeger.name}}.{{.Values.global.namespace}}.svc.{{.Values.global.clusterDomain}}:16686 {{- end}} - -controller-namespace={{.Values.global.namespace}} - - -log-level={{.Values.controllerLogLevel}} + - -log-level={{.Values.global.controllerLogLevel}} {{- if .Values.enforcedHostRegexp }} - -enforced-host={{.Values.enforcedHostRegexp}} {{- else -}} diff --git a/test/install_test.go b/test/install_test.go index 55e58ba089966..616362cfedeca 100644 --- a/test/install_test.go +++ b/test/install_test.go @@ -303,7 +303,7 @@ func TestInstallOrUpgradeCli(t *testing.T) { // These need to be updated (if there are changes) once a new stable is released func helmOverridesStable(root *tls.CA) []string { return []string{ - "--set", "controllerLogLevel=debug", + "--set", "global.controllerLogLevel=debug", "--set", "global.linkerdVersion=" + TestHelper.UpgradeHelmFromVersion(), "--set", "global.proxy.image.version=" + TestHelper.UpgradeHelmFromVersion(), "--set", "global.identityTrustDomain=cluster.local", @@ -317,7 +317,7 @@ func helmOverridesStable(root *tls.CA) []string { // These need to correspond to the flags in the current edge func helmOverridesEdge(root *tls.CA) []string { return []string{ - "--set", "controllerLogLevel=debug", + "--set", "global.controllerLogLevel=debug", "--set", "global.linkerdVersion=" + TestHelper.GetVersion(), "--set", "global.proxy.image.version=" + TestHelper.GetVersion(), "--set", "global.identityTrustDomain=cluster.local", From 0484125e32ea91d33fe47c88bea9958c6693a979 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Fri, 19 Jun 2020 12:22:37 +0000 Subject: [PATCH 29/42] update golden files Signed-off-by: Tarun Pothulapati --- .../testdata/install_addon_control-plane.golden | 16 ++++++++-------- cli/cmd/testdata/install_control-plane.golden | 16 ++++++++-------- .../install_controlplane_tracing_output.golden | 16 ++++++++-------- cli/cmd/testdata/install_custom_registry.golden | 16 ++++++++-------- cli/cmd/testdata/install_default.golden | 16 ++++++++-------- .../install_default_override_dst_get_nets.golden | 16 ++++++++-------- cli/cmd/testdata/install_grafana_existing.golden | 16 ++++++++-------- cli/cmd/testdata/install_ha_output.golden | 16 ++++++++-------- .../install_ha_with_overrides_output.golden | 16 ++++++++-------- .../install_heartbeat_disabled_output.golden | 14 +++++++------- cli/cmd/testdata/install_helm_output.golden | 16 ++++++++-------- .../testdata/install_helm_output_addons.golden | 16 ++++++++-------- cli/cmd/testdata/install_helm_output_ha.golden | 16 ++++++++-------- .../testdata/install_no_init_container.golden | 16 ++++++++-------- cli/cmd/testdata/install_output.golden | 16 ++++++++-------- .../testdata/install_prometheus_overwrite.golden | 16 ++++++++-------- cli/cmd/testdata/install_proxy_ignores.golden | 16 ++++++++-------- .../testdata/install_restricted_dashboard.golden | 16 ++++++++-------- cli/cmd/testdata/install_tracing.golden | 16 ++++++++-------- .../testdata/install_tracing_overwrite.golden | 16 ++++++++-------- .../testdata/upgrade_add-on_controlplane.golden | 16 ++++++++-------- cli/cmd/testdata/upgrade_add-on_overwrite.golden | 16 ++++++++-------- cli/cmd/testdata/upgrade_add_add-on.golden | 16 ++++++++-------- cli/cmd/testdata/upgrade_default.golden | 16 ++++++++-------- cli/cmd/testdata/upgrade_external_issuer.golden | 16 ++++++++-------- .../upgrade_grafana_addon_overwrite.yaml | 16 ++++++++-------- cli/cmd/testdata/upgrade_grafana_disabled.yaml | 16 ++++++++-------- cli/cmd/testdata/upgrade_grafana_enabled.yaml | 16 ++++++++-------- .../upgrade_grafana_enabled_disabled.yaml | 16 ++++++++-------- cli/cmd/testdata/upgrade_grafana_overwrite.yaml | 16 ++++++++-------- cli/cmd/testdata/upgrade_ha.golden | 16 ++++++++-------- .../upgrade_keep_webhook_cabundle.golden | 16 ++++++++-------- cli/cmd/testdata/upgrade_nothing_addon.yaml | 16 ++++++++-------- cli/cmd/testdata/upgrade_overwrite_issuer.golden | 16 ++++++++-------- ...verwrite_trust_anchors-external-issuer.golden | 16 ++++++++-------- .../upgrade_overwrite_trust_anchors.golden | 16 ++++++++-------- .../upgrade_two_level_webhook_cert.golden | 16 ++++++++-------- 37 files changed, 295 insertions(+), 295 deletions(-) diff --git a/cli/cmd/testdata/install_addon_control-plane.golden b/cli/cmd/testdata/install_addon_control-plane.golden index 6a96eacddef37..f81a44dc3000c 100644 --- a/cli/cmd/testdata/install_addon_control-plane.golden +++ b/cli/cmd/testdata/install_addon_control-plane.golden @@ -92,7 +92,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -316,7 +316,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -535,7 +535,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -733,7 +733,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -803,7 +803,7 @@ spec: - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -jaeger-addr=linkerd-jaeger.linkerd.svc.cluster.local:16686 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -999,7 +999,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1238,7 +1238,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1459,7 +1459,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_control-plane.golden b/cli/cmd/testdata/install_control-plane.golden index 6fb4fc6755282..67414c32846f9 100644 --- a/cli/cmd/testdata/install_control-plane.golden +++ b/cli/cmd/testdata/install_control-plane.golden @@ -92,7 +92,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -316,7 +316,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -535,7 +535,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -733,7 +733,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -802,7 +802,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -998,7 +998,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1237,7 +1237,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1458,7 +1458,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_controlplane_tracing_output.golden b/cli/cmd/testdata/install_controlplane_tracing_output.golden index 41519eba6a8ec..a878d84319b70 100644 --- a/cli/cmd/testdata/install_controlplane_tracing_output.golden +++ b/cli/cmd/testdata/install_controlplane_tracing_output.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info - -trace-collector=linkerd-collector.linkerd.svc.cluster.local:55678 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1120,7 +1120,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -trace-collector=linkerd-collector.linkerd.svc.cluster.local:55678 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1354,7 +1354,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info - -trace-collector=linkerd-collector.linkerd.svc.cluster.local:55678 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1567,7 +1567,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1636,7 +1636,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ - -trace-collector=linkerd-collector.linkerd.svc.cluster.local:55678 image: gcr.io/linkerd-io/web:install-control-plane-version @@ -1847,7 +1847,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2100,7 +2100,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2335,7 +2335,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -trace-collector=linkerd-collector.linkerd.svc.cluster.local:55678 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent diff --git a/cli/cmd/testdata/install_custom_registry.golden b/cli/cmd/testdata/install_custom_registry.golden index c3b7c997a79cf..a0ceb06f48ab0 100644 --- a/cli/cmd/testdata/install_custom_registry.golden +++ b/cli/cmd/testdata/install_custom_registry.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: my.custom.registry/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1105,7 +1105,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: my.custom.registry/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1324,7 +1324,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: my.custom.registry/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1522,7 +1522,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1591,7 +1591,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: my.custom.registry/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1787,7 +1787,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: my.custom.registry/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2026,7 +2026,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: my.custom.registry/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2247,7 +2247,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: my.custom.registry/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_default.golden b/cli/cmd/testdata/install_default.golden index b138b8a092b56..9ea992be4eddb 100644 --- a/cli/cmd/testdata/install_default.golden +++ b/cli/cmd/testdata/install_default.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1105,7 +1105,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1324,7 +1324,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1522,7 +1522,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1591,7 +1591,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1787,7 +1787,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2026,7 +2026,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2247,7 +2247,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_default_override_dst_get_nets.golden b/cli/cmd/testdata/install_default_override_dst_get_nets.golden index 17b7002eb4862..bb0486ee1be61 100644 --- a/cli/cmd/testdata/install_default_override_dst_get_nets.golden +++ b/cli/cmd/testdata/install_default_override_dst_get_nets.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1105,7 +1105,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1324,7 +1324,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1522,7 +1522,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1591,7 +1591,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1787,7 +1787,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2026,7 +2026,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2247,7 +2247,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_grafana_existing.golden b/cli/cmd/testdata/install_grafana_existing.golden index 29273f0297815..8f6b38532aa48 100644 --- a/cli/cmd/testdata/install_grafana_existing.golden +++ b/cli/cmd/testdata/install_grafana_existing.golden @@ -878,7 +878,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1102,7 +1102,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1321,7 +1321,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1519,7 +1519,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1588,7 +1588,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=somegrafana.xyz - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1784,7 +1784,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2023,7 +2023,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2244,7 +2244,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_ha_output.golden b/cli/cmd/testdata/install_ha_output.golden index 47572e8a0cbb4..7fabff66e7c77 100644 --- a/cli/cmd/testdata/install_ha_output.golden +++ b/cli/cmd/testdata/install_ha_output.golden @@ -904,7 +904,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1164,7 +1164,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1419,7 +1419,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1630,7 +1630,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" resources: limits: cpu: "1" @@ -1706,7 +1706,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1938,7 +1938,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2213,7 +2213,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2470,7 +2470,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_ha_with_overrides_output.golden b/cli/cmd/testdata/install_ha_with_overrides_output.golden index 71dedd668d4b4..80e6ae5be7bc6 100644 --- a/cli/cmd/testdata/install_ha_with_overrides_output.golden +++ b/cli/cmd/testdata/install_ha_with_overrides_output.golden @@ -904,7 +904,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1164,7 +1164,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1419,7 +1419,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1630,7 +1630,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" resources: limits: cpu: "1" @@ -1706,7 +1706,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1938,7 +1938,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2213,7 +2213,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2470,7 +2470,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_heartbeat_disabled_output.golden b/cli/cmd/testdata/install_heartbeat_disabled_output.golden index 6ffd74d819d6c..da2286161af38 100644 --- a/cli/cmd/testdata/install_heartbeat_disabled_output.golden +++ b/cli/cmd/testdata/install_heartbeat_disabled_output.golden @@ -837,7 +837,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1061,7 +1061,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1280,7 +1280,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1502,7 +1502,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1698,7 +1698,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1937,7 +1937,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2158,7 +2158,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_helm_output.golden b/cli/cmd/testdata/install_helm_output.golden index 9db1ff598a0cd..89ddfcfb58fae 100644 --- a/cli/cmd/testdata/install_helm_output.golden +++ b/cli/cmd/testdata/install_helm_output.golden @@ -970,7 +970,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1187,7 +1187,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1399,7 +1399,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1590,7 +1590,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1661,7 +1661,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:linkerd-version imagePullPolicy: IfNotPresent @@ -1851,7 +1851,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2084,7 +2084,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2299,7 +2299,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_helm_output_addons.golden b/cli/cmd/testdata/install_helm_output_addons.golden index 9661f6c097e0e..209dc71f1b945 100644 --- a/cli/cmd/testdata/install_helm_output_addons.golden +++ b/cli/cmd/testdata/install_helm_output_addons.golden @@ -970,7 +970,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1187,7 +1187,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1399,7 +1399,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1590,7 +1590,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1662,7 +1662,7 @@ spec: - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -jaeger-addr=linkerd-jaeger.linkerd.svc.cluster.local:16686 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:linkerd-version imagePullPolicy: IfNotPresent @@ -1852,7 +1852,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2085,7 +2085,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2300,7 +2300,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_helm_output_ha.golden b/cli/cmd/testdata/install_helm_output_ha.golden index a2af30fd4402d..71b1b27b19a94 100644 --- a/cli/cmd/testdata/install_helm_output_ha.golden +++ b/cli/cmd/testdata/install_helm_output_ha.golden @@ -993,7 +993,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1246,7 +1246,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1494,7 +1494,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1698,7 +1698,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" resources: limits: cpu: "1" @@ -1776,7 +1776,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:linkerd-version imagePullPolicy: IfNotPresent @@ -2002,7 +2002,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2271,7 +2271,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2522,7 +2522,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_no_init_container.golden b/cli/cmd/testdata/install_no_init_container.golden index 9633a56c05e45..4cc51647d8b86 100644 --- a/cli/cmd/testdata/install_no_init_container.golden +++ b/cli/cmd/testdata/install_no_init_container.golden @@ -878,7 +878,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1069,7 +1069,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1255,7 +1255,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1420,7 +1420,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1489,7 +1489,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1652,7 +1652,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1858,7 +1858,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2046,7 +2046,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_output.golden b/cli/cmd/testdata/install_output.golden index e8e82421e187a..44489670142a4 100644 --- a/cli/cmd/testdata/install_output.golden +++ b/cli/cmd/testdata/install_output.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=ControllerLogLevel image: ControllerImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy livenessProbe: @@ -1104,7 +1104,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.Namespace.svc.cluster.local:9090 - -destination-addr=linkerd-dst.Namespace.svc.cluster.local:8086 - -controller-namespace=Namespace - - -log-level= + - -log-level=ControllerLogLevel image: ControllerImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy livenessProbe: @@ -1322,7 +1322,7 @@ spec: - -addr=:8086 - -controller-namespace=Namespace - -enable-h2-upgrade=true - - -log-level= + - -log-level=ControllerLogLevel image: ControllerImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy livenessProbe: @@ -1519,7 +1519,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.Namespace.svc.cluster.local:9090" - "-controller-namespace=Namespace" - - "-log-level=" + - "-log-level=ControllerLogLevel" securityContext: runAsUser: 2103 --- @@ -1588,7 +1588,7 @@ spec: - -api-addr=linkerd-controller-api.Namespace.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.Namespace.svc.cluster.local:3000 - -controller-namespace=Namespace - - -log-level= + - -log-level=ControllerLogLevel - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.Namespace\.svc\.cluster\.local|linkerd-web\.Namespace\.svc|\[::1\])(:\d+)?$ image: WebImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy @@ -1783,7 +1783,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=ControllerLogLevel image: ControllerImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy livenessProbe: @@ -2021,7 +2021,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=ControllerLogLevel image: ControllerImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy livenessProbe: @@ -2241,7 +2241,7 @@ spec: - args: - tap - -controller-namespace=Namespace - - -log-level= + - -log-level=ControllerLogLevel image: ControllerImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy livenessProbe: diff --git a/cli/cmd/testdata/install_prometheus_overwrite.golden b/cli/cmd/testdata/install_prometheus_overwrite.golden index 84b73be7f86d4..838ae8a9fecfa 100644 --- a/cli/cmd/testdata/install_prometheus_overwrite.golden +++ b/cli/cmd/testdata/install_prometheus_overwrite.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1105,7 +1105,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1324,7 +1324,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1522,7 +1522,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1591,7 +1591,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1787,7 +1787,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2026,7 +2026,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2247,7 +2247,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_proxy_ignores.golden b/cli/cmd/testdata/install_proxy_ignores.golden index fa3d61f3d84bd..de55a8cf0dc9b 100644 --- a/cli/cmd/testdata/install_proxy_ignores.golden +++ b/cli/cmd/testdata/install_proxy_ignores.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1105,7 +1105,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1324,7 +1324,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1522,7 +1522,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1591,7 +1591,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1787,7 +1787,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2026,7 +2026,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2247,7 +2247,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_restricted_dashboard.golden b/cli/cmd/testdata/install_restricted_dashboard.golden index 03a9d1a102ab6..16211a3f3f590 100644 --- a/cli/cmd/testdata/install_restricted_dashboard.golden +++ b/cli/cmd/testdata/install_restricted_dashboard.golden @@ -813,7 +813,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1037,7 +1037,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1256,7 +1256,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1454,7 +1454,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1523,7 +1523,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1719,7 +1719,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1958,7 +1958,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2179,7 +2179,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_tracing.golden b/cli/cmd/testdata/install_tracing.golden index e1b37e8a488b2..509855f1cca8a 100644 --- a/cli/cmd/testdata/install_tracing.golden +++ b/cli/cmd/testdata/install_tracing.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1105,7 +1105,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1324,7 +1324,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1522,7 +1522,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1592,7 +1592,7 @@ spec: - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -jaeger-addr=linkerd-jaeger.linkerd.svc.cluster.local:16686 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1788,7 +1788,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2027,7 +2027,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2248,7 +2248,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_tracing_overwrite.golden b/cli/cmd/testdata/install_tracing_overwrite.golden index 68c05c850b94b..110d2c72e32f2 100644 --- a/cli/cmd/testdata/install_tracing_overwrite.golden +++ b/cli/cmd/testdata/install_tracing_overwrite.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1105,7 +1105,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1324,7 +1324,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1522,7 +1522,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1592,7 +1592,7 @@ spec: - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -jaeger-addr=linkerd-jaeger.linkerd.svc.cluster.local:16686 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1788,7 +1788,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2027,7 +2027,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -2248,7 +2248,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_add-on_controlplane.golden b/cli/cmd/testdata/upgrade_add-on_controlplane.golden index b0086b1ce62ff..521e583e2056c 100644 --- a/cli/cmd/testdata/upgrade_add-on_controlplane.golden +++ b/cli/cmd/testdata/upgrade_add-on_controlplane.golden @@ -92,7 +92,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -318,7 +318,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -539,7 +539,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -739,7 +739,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -809,7 +809,7 @@ spec: - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -jaeger-addr=linkerd-jaeger.linkerd.svc.cluster.local:16686 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1007,7 +1007,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1248,7 +1248,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1471,7 +1471,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_add-on_overwrite.golden b/cli/cmd/testdata/upgrade_add-on_overwrite.golden index 34f84123ac3e7..dc9c1f2a6f161 100644 --- a/cli/cmd/testdata/upgrade_add-on_overwrite.golden +++ b/cli/cmd/testdata/upgrade_add-on_overwrite.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1107,7 +1107,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1328,7 +1328,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1528,7 +1528,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1598,7 +1598,7 @@ spec: - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -jaeger-addr=linkerd-jaeger.linkerd.svc.cluster.local:16686 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1796,7 +1796,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2037,7 +2037,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2260,7 +2260,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_add_add-on.golden b/cli/cmd/testdata/upgrade_add_add-on.golden index ae93d11de5dc0..ca3c26f23901d 100644 --- a/cli/cmd/testdata/upgrade_add_add-on.golden +++ b/cli/cmd/testdata/upgrade_add_add-on.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1107,7 +1107,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1328,7 +1328,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1528,7 +1528,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1598,7 +1598,7 @@ spec: - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -jaeger-addr=linkerd-jaeger.linkerd.svc.cluster.local:16686 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1796,7 +1796,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2037,7 +2037,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2260,7 +2260,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_default.golden b/cli/cmd/testdata/upgrade_default.golden index 329a95918898d..fcaf53d34e301 100644 --- a/cli/cmd/testdata/upgrade_default.golden +++ b/cli/cmd/testdata/upgrade_default.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1107,7 +1107,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1328,7 +1328,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1528,7 +1528,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1597,7 +1597,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1795,7 +1795,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2036,7 +2036,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2259,7 +2259,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_external_issuer.golden b/cli/cmd/testdata/upgrade_external_issuer.golden index 93f5d3140e17b..af8aaf948163c 100644 --- a/cli/cmd/testdata/upgrade_external_issuer.golden +++ b/cli/cmd/testdata/upgrade_external_issuer.golden @@ -867,7 +867,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1093,7 +1093,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1314,7 +1314,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1514,7 +1514,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1583,7 +1583,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1781,7 +1781,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2022,7 +2022,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2245,7 +2245,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml index ced760d17e001..db189aea983ed 100644 --- a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1107,7 +1107,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1328,7 +1328,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1528,7 +1528,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1597,7 +1597,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1795,7 +1795,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2036,7 +2036,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2259,7 +2259,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_grafana_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_disabled.yaml index fc4c8d3a98c7b..0f0a73627dc33 100644 --- a/cli/cmd/testdata/upgrade_grafana_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_disabled.yaml @@ -878,7 +878,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1104,7 +1104,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1325,7 +1325,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1525,7 +1525,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1593,7 +1593,7 @@ spec: - args: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1791,7 +1791,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2032,7 +2032,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2255,7 +2255,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_grafana_enabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled.yaml index ced760d17e001..db189aea983ed 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled.yaml @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1107,7 +1107,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1328,7 +1328,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1528,7 +1528,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1597,7 +1597,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1795,7 +1795,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2036,7 +2036,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2259,7 +2259,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml index fc4c8d3a98c7b..0f0a73627dc33 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml @@ -878,7 +878,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1104,7 +1104,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1325,7 +1325,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1525,7 +1525,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1593,7 +1593,7 @@ spec: - args: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1791,7 +1791,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2032,7 +2032,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2255,7 +2255,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml index 2f18417dadd63..a196939e6809b 100644 --- a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1107,7 +1107,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1328,7 +1328,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1528,7 +1528,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1597,7 +1597,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana-overwrite.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1795,7 +1795,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2036,7 +2036,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2259,7 +2259,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_ha.golden b/cli/cmd/testdata/upgrade_ha.golden index cdfce8db970bb..5a37f363d3549 100644 --- a/cli/cmd/testdata/upgrade_ha.golden +++ b/cli/cmd/testdata/upgrade_ha.golden @@ -904,7 +904,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1166,7 +1166,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1423,7 +1423,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1636,7 +1636,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" resources: limits: cpu: "1" @@ -1712,7 +1712,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1946,7 +1946,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2223,7 +2223,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2482,7 +2482,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden index ea5d3e18f91d3..2f9b8ba77fb78 100644 --- a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden +++ b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1107,7 +1107,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1328,7 +1328,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1528,7 +1528,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1597,7 +1597,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1795,7 +1795,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2036,7 +2036,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2259,7 +2259,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_nothing_addon.yaml b/cli/cmd/testdata/upgrade_nothing_addon.yaml index ced760d17e001..db189aea983ed 100644 --- a/cli/cmd/testdata/upgrade_nothing_addon.yaml +++ b/cli/cmd/testdata/upgrade_nothing_addon.yaml @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1107,7 +1107,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1328,7 +1328,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1528,7 +1528,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1597,7 +1597,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1795,7 +1795,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2036,7 +2036,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2259,7 +2259,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_overwrite_issuer.golden b/cli/cmd/testdata/upgrade_overwrite_issuer.golden index b84e006443b61..fdd3ba7e12723 100644 --- a/cli/cmd/testdata/upgrade_overwrite_issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_issuer.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1105,7 +1105,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1324,7 +1324,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1522,7 +1522,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1591,7 +1591,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1787,7 +1787,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2026,7 +2026,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2247,7 +2247,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden index f64e51809c045..e34cfde8840b2 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden @@ -867,7 +867,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1091,7 +1091,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1310,7 +1310,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1508,7 +1508,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1577,7 +1577,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1773,7 +1773,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2012,7 +2012,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2233,7 +2233,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden index b84e006443b61..fdd3ba7e12723 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1105,7 +1105,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1324,7 +1324,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1522,7 +1522,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1591,7 +1591,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1787,7 +1787,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2026,7 +2026,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2247,7 +2247,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden index 151f6c7447300..2a80c86972c04 100644 --- a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden +++ b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden @@ -881,7 +881,7 @@ spec: containers: - args: - identity - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1107,7 +1107,7 @@ spec: - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1328,7 +1328,7 @@ spec: - -addr=:8086 - -controller-namespace=linkerd - -enable-h2-upgrade=true - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1528,7 +1528,7 @@ spec: - "heartbeat" - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - - "-log-level=" + - "-log-level=info" securityContext: runAsUser: 2103 --- @@ -1597,7 +1597,7 @@ spec: - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085 - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000 - -controller-namespace=linkerd - - -log-level= + - -log-level=info - -enforced-host=^(localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$ image: gcr.io/linkerd-io/web:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent @@ -1795,7 +1795,7 @@ spec: containers: - args: - proxy-injector - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2036,7 +2036,7 @@ spec: containers: - args: - sp-validator - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -2259,7 +2259,7 @@ spec: - args: - tap - -controller-namespace=linkerd - - -log-level= + - -log-level=info image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: From 2edea90b395bbdad9f202b841a7d35e933555020 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Mon, 22 Jun 2020 07:41:25 +0000 Subject: [PATCH 30/42] move default values to add-ons/values.yaml Signed-off-by: Tarun Pothulapati --- .../prometheus/templates/prometheus.yaml | 55 +---------- charts/add-ons/prometheus/values.yaml | 54 +++++++++++ cli/cmd/install.go | 11 ++- .../install_addon_control-plane.golden | 94 +++++++++---------- cli/cmd/testdata/install_control-plane.golden | 94 +++++++++---------- ...install_controlplane_tracing_output.golden | 94 +++++++++---------- .../testdata/install_custom_registry.golden | 94 +++++++++---------- cli/cmd/testdata/install_default.golden | 94 +++++++++---------- ...stall_default_override_dst_get_nets.golden | 94 +++++++++---------- .../testdata/install_grafana_existing.golden | 94 +++++++++---------- cli/cmd/testdata/install_ha_output.golden | 94 +++++++++---------- .../install_ha_with_overrides_output.golden | 94 +++++++++---------- .../install_heartbeat_disabled_output.golden | 94 +++++++++---------- cli/cmd/testdata/install_helm_output.golden | 51 ---------- .../install_helm_output_addons.golden | 51 ---------- .../testdata/install_helm_output_ha.golden | 51 ---------- .../testdata/install_no_init_container.golden | 94 +++++++++---------- cli/cmd/testdata/install_output.golden | 94 +++++++++---------- .../install_prometheus_overwrite.golden | 60 ++---------- cli/cmd/testdata/install_proxy_ignores.golden | 94 +++++++++---------- .../install_restricted_dashboard.golden | 94 +++++++++---------- cli/cmd/testdata/install_tracing.golden | 94 +++++++++---------- .../testdata/install_tracing_overwrite.golden | 94 +++++++++---------- cli/cmd/testdata/prom-config.yaml | 4 +- .../upgrade_add-on_controlplane.golden | 94 +++++++++---------- .../testdata/upgrade_add-on_overwrite.golden | 94 +++++++++---------- cli/cmd/testdata/upgrade_add_add-on.golden | 94 +++++++++---------- cli/cmd/testdata/upgrade_default.golden | 94 +++++++++---------- .../testdata/upgrade_external_issuer.golden | 94 +++++++++---------- .../upgrade_grafana_addon_overwrite.yaml | 94 +++++++++---------- .../testdata/upgrade_grafana_disabled.yaml | 94 +++++++++---------- cli/cmd/testdata/upgrade_grafana_enabled.yaml | 94 +++++++++---------- .../upgrade_grafana_enabled_disabled.yaml | 94 +++++++++---------- .../testdata/upgrade_grafana_overwrite.yaml | 94 +++++++++---------- cli/cmd/testdata/upgrade_ha.golden | 94 +++++++++---------- .../upgrade_keep_webhook_cabundle.golden | 94 +++++++++---------- cli/cmd/testdata/upgrade_nothing_addon.yaml | 94 +++++++++---------- .../testdata/upgrade_overwrite_issuer.golden | 94 +++++++++---------- ...write_trust_anchors-external-issuer.golden | 94 +++++++++---------- .../upgrade_overwrite_trust_anchors.golden | 94 +++++++++---------- .../upgrade_two_level_webhook_cert.golden | 94 +++++++++---------- 41 files changed, 1623 insertions(+), 1816 deletions(-) diff --git a/charts/add-ons/prometheus/templates/prometheus.yaml b/charts/add-ons/prometheus/templates/prometheus.yaml index 9cb84b75ceaa8..634fb2047d84e 100644 --- a/charts/add-ons/prometheus/templates/prometheus.yaml +++ b/charts/add-ons/prometheus/templates/prometheus.yaml @@ -16,9 +16,6 @@ metadata: data: prometheus.yml: |- global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s {{- if .Values.globalConfig -}} {{- toYaml .Values.globalConfig | trim | nindent 6 }} {{- end}} @@ -28,10 +25,6 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -43,33 +36,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -85,19 +51,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -151,8 +104,8 @@ data: regex: __tmp_pod_label_(.+) {{- if .Values.scrapeConfigs }} - {{ toYaml .Values.scrapeConfigs | trim | nindent 4 }} - {{ end }} + {{- toYaml .Values.scrapeConfigs | trim | nindent 4 }} + {{- end }} {{- if (or .Values.alertManagers .Values.alertRelabelConfigs) }} alerting: @@ -227,10 +180,6 @@ spec: {{- include "linkerd.node-selector" . | nindent 6 }} containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level={{lower (default .Values.global.controllerLogLevel .Values.logLevel)}} {{- range $key, $value := .Values.args}} - --{{ $key }}{{ if $value }}={{ $value }}{{ end }} {{- end }} diff --git a/charts/add-ons/prometheus/values.yaml b/charts/add-ons/prometheus/values.yaml index e69de29bb2d1d..9cfdcba518591 100644 --- a/charts/add-ons/prometheus/values.yaml +++ b/charts/add-ons/prometheus/values.yaml @@ -0,0 +1,54 @@ +image: prom/prometheus:v2.15.2 +args: + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h + config.file: /etc/prometheus/prometheus.yml + log.level: debug +globalConfig: + scrape_interval: 10s + scrape_timeout: 10s + evaluation_interval: 10s +scrapeConfigs: +- job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + +# Required for: https://grafana.com/grafana/dashboards/315 +- job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop + +- job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component \ No newline at end of file diff --git a/cli/cmd/install.go b/cli/cmd/install.go index 8a94310c9e08e..e9e657e01012f 100644 --- a/cli/cmd/install.go +++ b/cli/cmd/install.go @@ -827,9 +827,14 @@ func render(w io.Writer, values *l5dcharts.Values) error { Dir: addOnChartsPath + "/" + addOn.Name(), Namespace: controlPlaneNamespace, RawValues: append(addOn.Values(), rawValues...), - Files: []*chartutil.BufferedFile{&chartutil.BufferedFile{ - Name: chartutil.ChartfileName, - }}, + Files: []*chartutil.BufferedFile{ + { + Name: chartutil.ChartfileName, + }, + { + Name: chartutil.ValuesfileName, + }, + }, } } diff --git a/cli/cmd/testdata/install_addon_control-plane.golden b/cli/cmd/testdata/install_addon_control-plane.golden index f81a44dc3000c..c15a92cac010f 100644 --- a/cli/cmd/testdata/install_addon_control-plane.golden +++ b/cli/cmd/testdata/install_addon_control-plane.golden @@ -1963,19 +1963,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -1987,33 +1983,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2029,19 +1998,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2093,6 +2049,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -2149,10 +2149,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_control-plane.golden b/cli/cmd/testdata/install_control-plane.golden index 67414c32846f9..aa5128e67e772 100644 --- a/cli/cmd/testdata/install_control-plane.golden +++ b/cli/cmd/testdata/install_control-plane.golden @@ -1956,19 +1956,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -1980,33 +1976,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2022,19 +1991,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2086,6 +2042,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -2142,10 +2142,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_controlplane_tracing_output.golden b/cli/cmd/testdata/install_controlplane_tracing_output.golden index a878d84319b70..baa14964eabb3 100644 --- a/cli/cmd/testdata/install_controlplane_tracing_output.golden +++ b/cli/cmd/testdata/install_controlplane_tracing_output.golden @@ -2916,19 +2916,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2940,33 +2936,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2982,19 +2951,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -3046,6 +3002,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -3102,10 +3102,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_custom_registry.golden b/cli/cmd/testdata/install_custom_registry.golden index a0ceb06f48ab0..5c93c53a72852 100644 --- a/cli/cmd/testdata/install_custom_registry.golden +++ b/cli/cmd/testdata/install_custom_registry.golden @@ -2799,19 +2799,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2823,33 +2819,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2865,19 +2834,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2929,6 +2885,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -2985,10 +2985,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_default.golden b/cli/cmd/testdata/install_default.golden index 9ea992be4eddb..4ca081f4d6ebd 100644 --- a/cli/cmd/testdata/install_default.golden +++ b/cli/cmd/testdata/install_default.golden @@ -2799,19 +2799,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2823,33 +2819,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2865,19 +2834,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2929,6 +2885,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -2985,10 +2985,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_default_override_dst_get_nets.golden b/cli/cmd/testdata/install_default_override_dst_get_nets.golden index bb0486ee1be61..579ed6b22ef69 100644 --- a/cli/cmd/testdata/install_default_override_dst_get_nets.golden +++ b/cli/cmd/testdata/install_default_override_dst_get_nets.golden @@ -2799,19 +2799,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2823,33 +2819,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2865,19 +2834,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2929,6 +2885,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -2985,10 +2985,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_grafana_existing.golden b/cli/cmd/testdata/install_grafana_existing.golden index 8f6b38532aa48..c2d25ca5ed309 100644 --- a/cli/cmd/testdata/install_grafana_existing.golden +++ b/cli/cmd/testdata/install_grafana_existing.golden @@ -2491,19 +2491,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2515,33 +2511,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2557,19 +2526,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2621,6 +2577,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -2677,10 +2677,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_ha_output.golden b/cli/cmd/testdata/install_ha_output.golden index 7fabff66e7c77..2b04b8d7e2a9c 100644 --- a/cli/cmd/testdata/install_ha_output.golden +++ b/cli/cmd/testdata/install_ha_output.golden @@ -3062,19 +3062,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -3086,33 +3082,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -3128,19 +3097,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -3192,6 +3148,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -3248,10 +3248,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_ha_with_overrides_output.golden b/cli/cmd/testdata/install_ha_with_overrides_output.golden index 80e6ae5be7bc6..13471e3010e4f 100644 --- a/cli/cmd/testdata/install_ha_with_overrides_output.golden +++ b/cli/cmd/testdata/install_ha_with_overrides_output.golden @@ -3062,19 +3062,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -3086,33 +3082,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -3128,19 +3097,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -3192,6 +3148,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -3248,10 +3248,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_heartbeat_disabled_output.golden b/cli/cmd/testdata/install_heartbeat_disabled_output.golden index da2286161af38..c46b9056402fa 100644 --- a/cli/cmd/testdata/install_heartbeat_disabled_output.golden +++ b/cli/cmd/testdata/install_heartbeat_disabled_output.golden @@ -2710,19 +2710,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2734,33 +2730,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2776,19 +2745,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2840,6 +2796,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -2896,10 +2896,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_helm_output.golden b/cli/cmd/testdata/install_helm_output.golden index 89ddfcfb58fae..85fd636d849c6 100644 --- a/cli/cmd/testdata/install_helm_output.golden +++ b/cli/cmd/testdata/install_helm_output.golden @@ -2845,19 +2845,12 @@ metadata: data: prometheus.yml: |- global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2869,33 +2862,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2911,19 +2877,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -3031,10 +2984,6 @@ spec: null containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_helm_output_addons.golden b/cli/cmd/testdata/install_helm_output_addons.golden index 209dc71f1b945..3741f97a20e89 100644 --- a/cli/cmd/testdata/install_helm_output_addons.golden +++ b/cli/cmd/testdata/install_helm_output_addons.golden @@ -2852,19 +2852,12 @@ metadata: data: prometheus.yml: |- global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2876,33 +2869,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2918,19 +2884,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -3038,10 +2991,6 @@ spec: null containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_helm_output_ha.golden b/cli/cmd/testdata/install_helm_output_ha.golden index 71b1b27b19a94..1754bdbac58f5 100644 --- a/cli/cmd/testdata/install_helm_output_ha.golden +++ b/cli/cmd/testdata/install_helm_output_ha.golden @@ -3108,19 +3108,12 @@ metadata: data: prometheus.yml: |- global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -3132,33 +3125,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -3174,19 +3140,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -3294,10 +3247,6 @@ spec: null containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_no_init_container.golden b/cli/cmd/testdata/install_no_init_container.golden index 4cc51647d8b86..c7ceb09bb59e6 100644 --- a/cli/cmd/testdata/install_no_init_container.golden +++ b/cli/cmd/testdata/install_no_init_container.golden @@ -2532,19 +2532,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2556,33 +2552,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2598,19 +2567,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2662,6 +2618,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -2718,10 +2718,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_output.golden b/cli/cmd/testdata/install_output.golden index 44489670142a4..25ea8b9aca55d 100644 --- a/cli/cmd/testdata/install_output.golden +++ b/cli/cmd/testdata/install_output.golden @@ -2792,19 +2792,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2816,33 +2812,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2858,19 +2827,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2922,6 +2878,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -2978,10 +2978,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=controllerloglevel image: PrometheusImage imagePullPolicy: ImagePullPolicy livenessProbe: diff --git a/cli/cmd/testdata/install_prometheus_overwrite.golden b/cli/cmd/testdata/install_prometheus_overwrite.golden index 838ae8a9fecfa..9fd68813863fa 100644 --- a/cli/cmd/testdata/install_prometheus_overwrite.golden +++ b/cli/cmd/testdata/install_prometheus_overwrite.golden @@ -2450,6 +2450,7 @@ data: evaluation_interval: 2m external_labels: cluster: cluster-1 + image: linkedin.io/prom remoteWrite: - url: http://cortex-service.default:9009/api/prom/push ruleConfigMapMounts: @@ -2833,22 +2834,17 @@ metadata: data: prometheus.yml: |- global: - scrape_interval: 10s - scrape_timeout: 10s - evaluation_interval: 10s evaluation_interval: 2m external_labels: cluster: cluster-1 + scrape_interval: 10s + scrape_timeout: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2860,33 +2856,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2902,19 +2871,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2966,7 +2922,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token job_name: kubernetes-nodes kubernetes_sd_configs: @@ -2977,7 +2932,6 @@ data: scheme: https tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - alerting: alert_relabel_configs: - action: labeldrop @@ -3045,12 +2999,12 @@ spec: beta.kubernetes.io/os: linux containers: - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - --log.format=json - image: prom/prometheus:v2.15.2 + - --log.level=debug + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h + image: linkedin.io/prom imagePullPolicy: IfNotPresent livenessProbe: httpGet: diff --git a/cli/cmd/testdata/install_proxy_ignores.golden b/cli/cmd/testdata/install_proxy_ignores.golden index de55a8cf0dc9b..def8d10c9f4e9 100644 --- a/cli/cmd/testdata/install_proxy_ignores.golden +++ b/cli/cmd/testdata/install_proxy_ignores.golden @@ -2799,19 +2799,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2823,33 +2819,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2865,19 +2834,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2929,6 +2885,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -2985,10 +2985,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_restricted_dashboard.golden b/cli/cmd/testdata/install_restricted_dashboard.golden index 16211a3f3f590..2789afafcac14 100644 --- a/cli/cmd/testdata/install_restricted_dashboard.golden +++ b/cli/cmd/testdata/install_restricted_dashboard.golden @@ -2731,19 +2731,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2755,33 +2751,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2797,19 +2766,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2861,6 +2817,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -2917,10 +2917,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_tracing.golden b/cli/cmd/testdata/install_tracing.golden index 509855f1cca8a..8ea1e9451aead 100644 --- a/cli/cmd/testdata/install_tracing.golden +++ b/cli/cmd/testdata/install_tracing.golden @@ -2806,19 +2806,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2830,33 +2826,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2872,19 +2841,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2936,6 +2892,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -2992,10 +2992,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_tracing_overwrite.golden b/cli/cmd/testdata/install_tracing_overwrite.golden index 110d2c72e32f2..907896954ca90 100644 --- a/cli/cmd/testdata/install_tracing_overwrite.golden +++ b/cli/cmd/testdata/install_tracing_overwrite.golden @@ -2806,19 +2806,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2830,33 +2826,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2872,19 +2841,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2936,6 +2892,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -2992,10 +2992,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/prom-config.yaml b/cli/cmd/testdata/prom-config.yaml index 521d821656be2..cc4257c57cf24 100644 --- a/cli/cmd/testdata/prom-config.yaml +++ b/cli/cmd/testdata/prom-config.yaml @@ -1,7 +1,5 @@ -global: - prometheusUrl: http://cortex-service.default:9009/api/prom - prometheus: + image: linkedin.io/prom args: log.format: json globalConfig: diff --git a/cli/cmd/testdata/upgrade_add-on_controlplane.golden b/cli/cmd/testdata/upgrade_add-on_controlplane.golden index 521e583e2056c..25d0b2eb08250 100644 --- a/cli/cmd/testdata/upgrade_add-on_controlplane.golden +++ b/cli/cmd/testdata/upgrade_add-on_controlplane.golden @@ -1979,19 +1979,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2003,33 +1999,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2045,19 +2014,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2109,6 +2065,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -2165,10 +2165,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_add-on_overwrite.golden b/cli/cmd/testdata/upgrade_add-on_overwrite.golden index dc9c1f2a6f161..2039f67c659cb 100644 --- a/cli/cmd/testdata/upgrade_add-on_overwrite.golden +++ b/cli/cmd/testdata/upgrade_add-on_overwrite.golden @@ -2824,19 +2824,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2848,33 +2844,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2890,19 +2859,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2954,6 +2910,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -3010,10 +3010,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_add_add-on.golden b/cli/cmd/testdata/upgrade_add_add-on.golden index ca3c26f23901d..0d272fb3872a2 100644 --- a/cli/cmd/testdata/upgrade_add_add-on.golden +++ b/cli/cmd/testdata/upgrade_add_add-on.golden @@ -2822,19 +2822,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2846,33 +2842,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2888,19 +2857,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2952,6 +2908,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -3008,10 +3008,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_default.golden b/cli/cmd/testdata/upgrade_default.golden index fcaf53d34e301..9abc19b76f552 100644 --- a/cli/cmd/testdata/upgrade_default.golden +++ b/cli/cmd/testdata/upgrade_default.golden @@ -2815,19 +2815,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2839,33 +2835,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2881,19 +2850,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2945,6 +2901,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -3001,10 +3001,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_external_issuer.golden b/cli/cmd/testdata/upgrade_external_issuer.golden index af8aaf948163c..b9bdc1c4878c5 100644 --- a/cli/cmd/testdata/upgrade_external_issuer.golden +++ b/cli/cmd/testdata/upgrade_external_issuer.golden @@ -2801,19 +2801,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2825,33 +2821,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2867,19 +2836,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2931,6 +2887,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -2987,10 +2987,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml index db189aea983ed..04bed49cc94f9 100644 --- a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml @@ -2815,19 +2815,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2839,33 +2835,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2881,19 +2850,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2945,6 +2901,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -3001,10 +3001,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_grafana_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_disabled.yaml index 0f0a73627dc33..c13d6eadad2ff 100644 --- a/cli/cmd/testdata/upgrade_grafana_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_disabled.yaml @@ -2504,19 +2504,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2528,33 +2524,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2570,19 +2539,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2634,6 +2590,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -2690,10 +2690,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_grafana_enabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled.yaml index db189aea983ed..04bed49cc94f9 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled.yaml @@ -2815,19 +2815,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2839,33 +2835,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2881,19 +2850,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2945,6 +2901,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -3001,10 +3001,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml index 0f0a73627dc33..c13d6eadad2ff 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml @@ -2504,19 +2504,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2528,33 +2524,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2570,19 +2539,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2634,6 +2590,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -2690,10 +2690,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml index a196939e6809b..7b4bbfd91c393 100644 --- a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml @@ -2815,19 +2815,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2839,33 +2835,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2881,19 +2850,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2945,6 +2901,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -3001,10 +3001,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_ha.golden b/cli/cmd/testdata/upgrade_ha.golden index 5a37f363d3549..6aaa0de2610c0 100644 --- a/cli/cmd/testdata/upgrade_ha.golden +++ b/cli/cmd/testdata/upgrade_ha.golden @@ -3078,19 +3078,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -3102,33 +3098,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -3144,19 +3113,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -3208,6 +3164,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -3264,10 +3264,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden index 2f9b8ba77fb78..1bf382d36e356 100644 --- a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden +++ b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden @@ -2815,19 +2815,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2839,33 +2835,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2881,19 +2850,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2945,6 +2901,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -3001,10 +3001,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_nothing_addon.yaml b/cli/cmd/testdata/upgrade_nothing_addon.yaml index db189aea983ed..04bed49cc94f9 100644 --- a/cli/cmd/testdata/upgrade_nothing_addon.yaml +++ b/cli/cmd/testdata/upgrade_nothing_addon.yaml @@ -2815,19 +2815,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2839,33 +2835,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2881,19 +2850,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2945,6 +2901,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -3001,10 +3001,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_overwrite_issuer.golden b/cli/cmd/testdata/upgrade_overwrite_issuer.golden index fdd3ba7e12723..7023bcef515a5 100644 --- a/cli/cmd/testdata/upgrade_overwrite_issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_issuer.golden @@ -2799,19 +2799,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2823,33 +2819,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2865,19 +2834,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2929,6 +2885,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -2985,10 +2985,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden index e34cfde8840b2..1f972a310430d 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden @@ -2785,19 +2785,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2809,33 +2805,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2851,19 +2820,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2915,6 +2871,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -2971,10 +2971,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden index fdd3ba7e12723..7023bcef515a5 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden @@ -2799,19 +2799,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2823,33 +2819,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2865,19 +2834,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2929,6 +2885,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -2985,10 +2985,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden index 2a80c86972c04..257ed149fcd45 100644 --- a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden +++ b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden @@ -2815,19 +2815,15 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s rule_files: - /etc/prometheus/*_rules.yml - /etc/prometheus/*_rules.yaml scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2839,33 +2835,6 @@ data: action: keep regex: ^grafana$ - # Required for: https://grafana.com/grafana/dashboards/315 - - job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2881,19 +2850,6 @@ data: action: replace target_label: component - - job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component - - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2945,6 +2901,50 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + job_name: kubernetes-nodes-cadvisor + kubernetes_sd_configs: + - role: node + metric_relabel_configs: + - action: keep + regex: (container|machine)_(cpu|memory|network|fs)_(.+) + source_labels: + - __name__ + - action: drop + regex: container_memory_failures_total + source_labels: + - __name__ + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - replacement: kubernetes.default.svc:443 + target_label: __address__ + - regex: (.+) + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + source_labels: + - __meta_kubernetes_node_name + target_label: __metrics_path__ + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - job_name: linkerd-service-mirror + kubernetes_sd_configs: + - role: pod + relabel_configs: + - action: keep + regex: linkerd-service-mirror;admin-http$ + source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: component --- kind: Service apiVersion: v1 @@ -3001,10 +3001,10 @@ spec: beta.kubernetes.io/os: linux containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=debug - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: From d318efecebf7b4e76590c01f4f165d95681fee7d Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Tue, 23 Jun 2020 06:52:32 +0000 Subject: [PATCH 31/42] nits around readme, requirements ordering and prom log default Signed-off-by: Tarun Pothulapati --- charts/add-ons/prometheus/values.yaml | 2 +- charts/linkerd2/README.md | 2 +- charts/linkerd2/requirements.yaml | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/charts/add-ons/prometheus/values.yaml b/charts/add-ons/prometheus/values.yaml index 9cfdcba518591..701b97f3fc6b2 100644 --- a/charts/add-ons/prometheus/values.yaml +++ b/charts/add-ons/prometheus/values.yaml @@ -3,7 +3,7 @@ args: storage.tsdb.path: /data storage.tsdb.retention.time: 6h config.file: /etc/prometheus/prometheus.yml - log.level: debug + log.level: info globalConfig: scrape_interval: 10s scrape_timeout: 10s diff --git a/charts/linkerd2/README.md b/charts/linkerd2/README.md index f5dc5346be884..e5d6da964f212 100644 --- a/charts/linkerd2/README.md +++ b/charts/linkerd2/README.md @@ -192,10 +192,10 @@ The following table lists the configurable parameters for the Prometheus Add-On. | Parameter | Description | Default | |:--------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------| +| `prometheus.enabled` | Flag to enable prometheus instance to be installed | `true` | | `prometheus.alert_relabel_configs` | Alert relabeling is applied to alerts before they are sent to the Alertmanager. | `[]` | | `prometheus.alertManagers` | Alertmanager instances the Prometheus server sends alerts to configured via the static_configs parameter. | `[]` | | `prometheus.args` | Command line options for Prometheus binary | `storage.tsdb.path: /data, storage.tsdb.retention.time: 6h, config.file: /etc/prometheus/prometheus.yml, log.level: *controller_log_level` | -| `prometheus.enabled` | Flag to enable prometheus instance to be installed | `true` | `prometheus.globalConfig` | The global configuration specifies parameters that are valid in all other configuration contexts. | `scrape_interval: 10s, scrape_timeout: 10s, evaluation_interval: 10s` | | `prometheus.image` | Docker image for the prometheus instance | `prom/prometheus:v2.15.2` | | `prometheus.resources.cpu.limit` | Maximum amount of CPU units that the prometheus container can use || diff --git a/charts/linkerd2/requirements.yaml b/charts/linkerd2/requirements.yaml index 1795b70bad958..5f2d1aabd5c7d 100644 --- a/charts/linkerd2/requirements.yaml +++ b/charts/linkerd2/requirements.yaml @@ -1,11 +1,11 @@ dependencies: +- name: partials + version: 0.1.0 + repository: file://../partials - name: prometheus version: 0.1.0 repository: file://../add-ons/prometheus condition: prometheus.enabled -- name: partials - version: 0.1.0 - repository: file://../partials - name: grafana version: 0.1.0 repository: file://../add-ons/grafana From d03b3b33c305b87b27c373aa3b5a1a2f3fd4e7de Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Tue, 23 Jun 2020 07:42:00 +0000 Subject: [PATCH 32/42] update golden files Signed-off-by: Tarun Pothulapati --- cli/cmd/testdata/install_addon_control-plane.golden | 2 +- cli/cmd/testdata/install_control-plane.golden | 2 +- cli/cmd/testdata/install_controlplane_tracing_output.golden | 2 +- cli/cmd/testdata/install_custom_registry.golden | 2 +- cli/cmd/testdata/install_default.golden | 2 +- cli/cmd/testdata/install_default_override_dst_get_nets.golden | 2 +- cli/cmd/testdata/install_grafana_existing.golden | 2 +- cli/cmd/testdata/install_ha_output.golden | 2 +- cli/cmd/testdata/install_ha_with_overrides_output.golden | 2 +- cli/cmd/testdata/install_heartbeat_disabled_output.golden | 2 +- cli/cmd/testdata/install_no_init_container.golden | 2 +- cli/cmd/testdata/install_output.golden | 2 +- cli/cmd/testdata/install_prometheus_overwrite.golden | 2 +- cli/cmd/testdata/install_proxy_ignores.golden | 2 +- cli/cmd/testdata/install_restricted_dashboard.golden | 2 +- cli/cmd/testdata/install_tracing.golden | 2 +- cli/cmd/testdata/install_tracing_overwrite.golden | 2 +- cli/cmd/testdata/upgrade_add-on_controlplane.golden | 2 +- cli/cmd/testdata/upgrade_add-on_overwrite.golden | 2 +- cli/cmd/testdata/upgrade_add_add-on.golden | 2 +- cli/cmd/testdata/upgrade_default.golden | 2 +- cli/cmd/testdata/upgrade_external_issuer.golden | 2 +- cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml | 2 +- cli/cmd/testdata/upgrade_grafana_disabled.yaml | 2 +- cli/cmd/testdata/upgrade_grafana_enabled.yaml | 2 +- cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml | 2 +- cli/cmd/testdata/upgrade_grafana_overwrite.yaml | 2 +- cli/cmd/testdata/upgrade_ha.golden | 2 +- cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden | 2 +- cli/cmd/testdata/upgrade_nothing_addon.yaml | 2 +- cli/cmd/testdata/upgrade_overwrite_issuer.golden | 2 +- .../upgrade_overwrite_trust_anchors-external-issuer.golden | 2 +- cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden | 2 +- cli/cmd/testdata/upgrade_two_level_webhook_cert.golden | 2 +- 34 files changed, 34 insertions(+), 34 deletions(-) diff --git a/cli/cmd/testdata/install_addon_control-plane.golden b/cli/cmd/testdata/install_addon_control-plane.golden index c15a92cac010f..689b038075efe 100644 --- a/cli/cmd/testdata/install_addon_control-plane.golden +++ b/cli/cmd/testdata/install_addon_control-plane.golden @@ -2150,7 +2150,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/install_control-plane.golden b/cli/cmd/testdata/install_control-plane.golden index aa5128e67e772..d267c5d3cbe73 100644 --- a/cli/cmd/testdata/install_control-plane.golden +++ b/cli/cmd/testdata/install_control-plane.golden @@ -2143,7 +2143,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/install_controlplane_tracing_output.golden b/cli/cmd/testdata/install_controlplane_tracing_output.golden index baa14964eabb3..416082736c12c 100644 --- a/cli/cmd/testdata/install_controlplane_tracing_output.golden +++ b/cli/cmd/testdata/install_controlplane_tracing_output.golden @@ -3103,7 +3103,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/install_custom_registry.golden b/cli/cmd/testdata/install_custom_registry.golden index 5c93c53a72852..30388e7763602 100644 --- a/cli/cmd/testdata/install_custom_registry.golden +++ b/cli/cmd/testdata/install_custom_registry.golden @@ -2986,7 +2986,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/install_default.golden b/cli/cmd/testdata/install_default.golden index 4ca081f4d6ebd..44035418c903d 100644 --- a/cli/cmd/testdata/install_default.golden +++ b/cli/cmd/testdata/install_default.golden @@ -2986,7 +2986,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/install_default_override_dst_get_nets.golden b/cli/cmd/testdata/install_default_override_dst_get_nets.golden index 579ed6b22ef69..81f3ddab88af4 100644 --- a/cli/cmd/testdata/install_default_override_dst_get_nets.golden +++ b/cli/cmd/testdata/install_default_override_dst_get_nets.golden @@ -2986,7 +2986,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/install_grafana_existing.golden b/cli/cmd/testdata/install_grafana_existing.golden index c2d25ca5ed309..d2daba928f7b0 100644 --- a/cli/cmd/testdata/install_grafana_existing.golden +++ b/cli/cmd/testdata/install_grafana_existing.golden @@ -2678,7 +2678,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/install_ha_output.golden b/cli/cmd/testdata/install_ha_output.golden index 2b04b8d7e2a9c..d4c10e651c5bf 100644 --- a/cli/cmd/testdata/install_ha_output.golden +++ b/cli/cmd/testdata/install_ha_output.golden @@ -3249,7 +3249,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/install_ha_with_overrides_output.golden b/cli/cmd/testdata/install_ha_with_overrides_output.golden index 13471e3010e4f..c8bf56698a79f 100644 --- a/cli/cmd/testdata/install_ha_with_overrides_output.golden +++ b/cli/cmd/testdata/install_ha_with_overrides_output.golden @@ -3249,7 +3249,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/install_heartbeat_disabled_output.golden b/cli/cmd/testdata/install_heartbeat_disabled_output.golden index c46b9056402fa..05c33c982f14d 100644 --- a/cli/cmd/testdata/install_heartbeat_disabled_output.golden +++ b/cli/cmd/testdata/install_heartbeat_disabled_output.golden @@ -2897,7 +2897,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/install_no_init_container.golden b/cli/cmd/testdata/install_no_init_container.golden index c7ceb09bb59e6..55840049aad05 100644 --- a/cli/cmd/testdata/install_no_init_container.golden +++ b/cli/cmd/testdata/install_no_init_container.golden @@ -2719,7 +2719,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/install_output.golden b/cli/cmd/testdata/install_output.golden index 25ea8b9aca55d..f9415fb71cc8b 100644 --- a/cli/cmd/testdata/install_output.golden +++ b/cli/cmd/testdata/install_output.golden @@ -2979,7 +2979,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: PrometheusImage diff --git a/cli/cmd/testdata/install_prometheus_overwrite.golden b/cli/cmd/testdata/install_prometheus_overwrite.golden index 9fd68813863fa..658f8376a58a0 100644 --- a/cli/cmd/testdata/install_prometheus_overwrite.golden +++ b/cli/cmd/testdata/install_prometheus_overwrite.golden @@ -3001,7 +3001,7 @@ spec: - args: - --config.file=/etc/prometheus/prometheus.yml - --log.format=json - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: linkedin.io/prom diff --git a/cli/cmd/testdata/install_proxy_ignores.golden b/cli/cmd/testdata/install_proxy_ignores.golden index def8d10c9f4e9..a3b453454695a 100644 --- a/cli/cmd/testdata/install_proxy_ignores.golden +++ b/cli/cmd/testdata/install_proxy_ignores.golden @@ -2986,7 +2986,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/install_restricted_dashboard.golden b/cli/cmd/testdata/install_restricted_dashboard.golden index 2789afafcac14..85e7d8fe246a0 100644 --- a/cli/cmd/testdata/install_restricted_dashboard.golden +++ b/cli/cmd/testdata/install_restricted_dashboard.golden @@ -2918,7 +2918,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/install_tracing.golden b/cli/cmd/testdata/install_tracing.golden index 8ea1e9451aead..ab5337f30a270 100644 --- a/cli/cmd/testdata/install_tracing.golden +++ b/cli/cmd/testdata/install_tracing.golden @@ -2993,7 +2993,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/install_tracing_overwrite.golden b/cli/cmd/testdata/install_tracing_overwrite.golden index 907896954ca90..4b575dc00a368 100644 --- a/cli/cmd/testdata/install_tracing_overwrite.golden +++ b/cli/cmd/testdata/install_tracing_overwrite.golden @@ -2993,7 +2993,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/upgrade_add-on_controlplane.golden b/cli/cmd/testdata/upgrade_add-on_controlplane.golden index 25d0b2eb08250..3786aa5d64c34 100644 --- a/cli/cmd/testdata/upgrade_add-on_controlplane.golden +++ b/cli/cmd/testdata/upgrade_add-on_controlplane.golden @@ -2166,7 +2166,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/upgrade_add-on_overwrite.golden b/cli/cmd/testdata/upgrade_add-on_overwrite.golden index 2039f67c659cb..8d1941eade296 100644 --- a/cli/cmd/testdata/upgrade_add-on_overwrite.golden +++ b/cli/cmd/testdata/upgrade_add-on_overwrite.golden @@ -3011,7 +3011,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/upgrade_add_add-on.golden b/cli/cmd/testdata/upgrade_add_add-on.golden index 0d272fb3872a2..6e4155c447dac 100644 --- a/cli/cmd/testdata/upgrade_add_add-on.golden +++ b/cli/cmd/testdata/upgrade_add_add-on.golden @@ -3009,7 +3009,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/upgrade_default.golden b/cli/cmd/testdata/upgrade_default.golden index 9abc19b76f552..1c2ff494dd714 100644 --- a/cli/cmd/testdata/upgrade_default.golden +++ b/cli/cmd/testdata/upgrade_default.golden @@ -3002,7 +3002,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/upgrade_external_issuer.golden b/cli/cmd/testdata/upgrade_external_issuer.golden index b9bdc1c4878c5..799d27e679b61 100644 --- a/cli/cmd/testdata/upgrade_external_issuer.golden +++ b/cli/cmd/testdata/upgrade_external_issuer.golden @@ -2988,7 +2988,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml index 04bed49cc94f9..1e5be061fcb47 100644 --- a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml @@ -3002,7 +3002,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/upgrade_grafana_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_disabled.yaml index c13d6eadad2ff..81ca5e0453d9f 100644 --- a/cli/cmd/testdata/upgrade_grafana_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_disabled.yaml @@ -2691,7 +2691,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/upgrade_grafana_enabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled.yaml index 04bed49cc94f9..1e5be061fcb47 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled.yaml @@ -3002,7 +3002,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml index c13d6eadad2ff..81ca5e0453d9f 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml @@ -2691,7 +2691,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml index 7b4bbfd91c393..ec287a6b83944 100644 --- a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml @@ -3002,7 +3002,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/upgrade_ha.golden b/cli/cmd/testdata/upgrade_ha.golden index 6aaa0de2610c0..2f011cccf0c5b 100644 --- a/cli/cmd/testdata/upgrade_ha.golden +++ b/cli/cmd/testdata/upgrade_ha.golden @@ -3265,7 +3265,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden index 1bf382d36e356..230078a4b07f3 100644 --- a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden +++ b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden @@ -3002,7 +3002,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/upgrade_nothing_addon.yaml b/cli/cmd/testdata/upgrade_nothing_addon.yaml index 04bed49cc94f9..1e5be061fcb47 100644 --- a/cli/cmd/testdata/upgrade_nothing_addon.yaml +++ b/cli/cmd/testdata/upgrade_nothing_addon.yaml @@ -3002,7 +3002,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/upgrade_overwrite_issuer.golden b/cli/cmd/testdata/upgrade_overwrite_issuer.golden index 7023bcef515a5..4ee0df92de1b9 100644 --- a/cli/cmd/testdata/upgrade_overwrite_issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_issuer.golden @@ -2986,7 +2986,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden index 1f972a310430d..1e319c927c40c 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden @@ -2972,7 +2972,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden index 7023bcef515a5..4ee0df92de1b9 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden @@ -2986,7 +2986,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 diff --git a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden index 257ed149fcd45..f5528e0faab9a 100644 --- a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden +++ b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden @@ -3002,7 +3002,7 @@ spec: containers: - args: - --config.file=/etc/prometheus/prometheus.yml - - --log.level=debug + - --log.level=info - --storage.tsdb.path=/data - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 From f5fc848f1ce93c7e452e1e3ff87937baf39457d9 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Wed, 24 Jun 2020 11:16:21 +0000 Subject: [PATCH 33/42] move scrapeconfigs to templates Signed-off-by: Tarun Pothulapati --- .../prometheus/templates/prometheus.yaml | 42 ++++++++- charts/add-ons/prometheus/values.yaml | 46 +--------- charts/linkerd2/requirements.lock | 12 +-- .../install_addon_control-plane.golden | 86 +++++++++---------- cli/cmd/testdata/install_control-plane.golden | 86 +++++++++---------- ...install_controlplane_tracing_output.golden | 86 +++++++++---------- .../testdata/install_custom_registry.golden | 86 +++++++++---------- cli/cmd/testdata/install_default.golden | 86 +++++++++---------- ...stall_default_override_dst_get_nets.golden | 86 +++++++++---------- .../testdata/install_grafana_existing.golden | 86 +++++++++---------- cli/cmd/testdata/install_ha_output.golden | 86 +++++++++---------- .../install_ha_with_overrides_output.golden | 86 +++++++++---------- .../install_heartbeat_disabled_output.golden | 86 +++++++++---------- cli/cmd/testdata/install_helm_output.golden | 42 ++++++++- .../install_helm_output_addons.golden | 42 ++++++++- .../testdata/install_helm_output_ha.golden | 42 ++++++++- .../testdata/install_no_init_container.golden | 86 +++++++++---------- cli/cmd/testdata/install_output.golden | 86 +++++++++---------- .../install_prometheus_overwrite.golden | 42 ++++++++- cli/cmd/testdata/install_proxy_ignores.golden | 86 +++++++++---------- .../install_restricted_dashboard.golden | 86 +++++++++---------- cli/cmd/testdata/install_tracing.golden | 86 +++++++++---------- .../testdata/install_tracing_overwrite.golden | 86 +++++++++---------- .../upgrade_add-on_controlplane.golden | 86 +++++++++---------- .../testdata/upgrade_add-on_overwrite.golden | 86 +++++++++---------- cli/cmd/testdata/upgrade_add_add-on.golden | 86 +++++++++---------- cli/cmd/testdata/upgrade_default.golden | 86 +++++++++---------- .../testdata/upgrade_external_issuer.golden | 86 +++++++++---------- .../upgrade_grafana_addon_overwrite.yaml | 86 +++++++++---------- .../testdata/upgrade_grafana_disabled.yaml | 86 +++++++++---------- cli/cmd/testdata/upgrade_grafana_enabled.yaml | 86 +++++++++---------- .../upgrade_grafana_enabled_disabled.yaml | 86 +++++++++---------- .../testdata/upgrade_grafana_overwrite.yaml | 86 +++++++++---------- cli/cmd/testdata/upgrade_ha.golden | 86 +++++++++---------- .../upgrade_keep_webhook_cabundle.golden | 86 +++++++++---------- cli/cmd/testdata/upgrade_nothing_addon.yaml | 86 +++++++++---------- .../testdata/upgrade_overwrite_issuer.golden | 86 +++++++++---------- ...write_trust_anchors-external-issuer.golden | 86 +++++++++---------- .../upgrade_overwrite_trust_anchors.golden | 86 +++++++++---------- .../upgrade_two_level_webhook_cert.golden | 86 +++++++++---------- 40 files changed, 1527 insertions(+), 1579 deletions(-) diff --git a/charts/add-ons/prometheus/templates/prometheus.yaml b/charts/add-ons/prometheus/templates/prometheus.yaml index 634fb2047d84e..3dc9466adee74 100644 --- a/charts/add-ons/prometheus/templates/prometheus.yaml +++ b/charts/add-ons/prometheus/templates/prometheus.yaml @@ -25,6 +25,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -35,7 +38,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -50,7 +77,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/charts/add-ons/prometheus/values.yaml b/charts/add-ons/prometheus/values.yaml index 701b97f3fc6b2..d3d60d2f2b6ba 100644 --- a/charts/add-ons/prometheus/values.yaml +++ b/charts/add-ons/prometheus/values.yaml @@ -7,48 +7,4 @@ args: globalConfig: scrape_interval: 10s scrape_timeout: 10s - evaluation_interval: 10s -scrapeConfigs: -- job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - -# Required for: https://grafana.com/grafana/dashboards/315 -- job_name: 'kubernetes-nodes-cadvisor' - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - metric_relabel_configs: - - source_labels: [__name__] - regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' - action: keep - - source_labels: [__name__] - regex: 'container_memory_failures_total' # unneeded large metric - action: drop - -- job_name: 'linkerd-service-mirror' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - action: keep - regex: linkerd-service-mirror;admin-http$ - - source_labels: [__meta_kubernetes_pod_container_name] - action: replace - target_label: component \ No newline at end of file + evaluation_interval: 10s \ No newline at end of file diff --git a/charts/linkerd2/requirements.lock b/charts/linkerd2/requirements.lock index 664e1ccdf778a..ee33f734d5496 100644 --- a/charts/linkerd2/requirements.lock +++ b/charts/linkerd2/requirements.lock @@ -1,15 +1,15 @@ dependencies: -- name: grafana - repository: file://../add-ons/grafana +- name: partials + repository: file://../partials version: 0.1.0 - name: prometheus repository: file://../add-ons/prometheus version: 0.1.0 -- name: partials - repository: file://../partials +- name: grafana + repository: file://../add-ons/grafana version: 0.1.0 - name: tracing repository: file://../add-ons/tracing version: 0.1.0 -digest: sha256:b650dc0a30d65e4bbbff0c41e5b643fb96d0660cae658f185a1df5414602e306 -generated: "2020-05-08T13:09:25.498949694+05:30" +digest: sha256:d2428770ae7d5134c5af6521c78a4c5f95da4c75f21bdea0f74fad6ab6e2e044 +generated: "2020-06-24T11:07:53.924602129Z" diff --git a/cli/cmd/testdata/install_addon_control-plane.golden b/cli/cmd/testdata/install_addon_control-plane.golden index 689b038075efe..f962b660e9a23 100644 --- a/cli/cmd/testdata/install_addon_control-plane.golden +++ b/cli/cmd/testdata/install_addon_control-plane.golden @@ -1972,6 +1972,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -1982,7 +1985,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -1997,7 +2024,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2049,50 +2087,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/install_control-plane.golden b/cli/cmd/testdata/install_control-plane.golden index d267c5d3cbe73..f269d0e521df5 100644 --- a/cli/cmd/testdata/install_control-plane.golden +++ b/cli/cmd/testdata/install_control-plane.golden @@ -1965,6 +1965,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -1975,7 +1978,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -1990,7 +2017,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2042,50 +2080,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/install_controlplane_tracing_output.golden b/cli/cmd/testdata/install_controlplane_tracing_output.golden index 416082736c12c..8baedd05e41fa 100644 --- a/cli/cmd/testdata/install_controlplane_tracing_output.golden +++ b/cli/cmd/testdata/install_controlplane_tracing_output.golden @@ -2925,6 +2925,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2935,7 +2938,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2950,7 +2977,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -3002,50 +3040,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/install_custom_registry.golden b/cli/cmd/testdata/install_custom_registry.golden index 30388e7763602..45a8ee6524391 100644 --- a/cli/cmd/testdata/install_custom_registry.golden +++ b/cli/cmd/testdata/install_custom_registry.golden @@ -2808,6 +2808,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2818,7 +2821,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2833,7 +2860,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2885,50 +2923,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/install_default.golden b/cli/cmd/testdata/install_default.golden index 44035418c903d..7813ab075e8b7 100644 --- a/cli/cmd/testdata/install_default.golden +++ b/cli/cmd/testdata/install_default.golden @@ -2808,6 +2808,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2818,7 +2821,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2833,7 +2860,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2885,50 +2923,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/install_default_override_dst_get_nets.golden b/cli/cmd/testdata/install_default_override_dst_get_nets.golden index 81f3ddab88af4..68a0b913ea1f9 100644 --- a/cli/cmd/testdata/install_default_override_dst_get_nets.golden +++ b/cli/cmd/testdata/install_default_override_dst_get_nets.golden @@ -2808,6 +2808,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2818,7 +2821,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2833,7 +2860,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2885,50 +2923,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/install_grafana_existing.golden b/cli/cmd/testdata/install_grafana_existing.golden index d2daba928f7b0..9d67a1503198c 100644 --- a/cli/cmd/testdata/install_grafana_existing.golden +++ b/cli/cmd/testdata/install_grafana_existing.golden @@ -2500,6 +2500,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2510,7 +2513,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2525,7 +2552,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2577,50 +2615,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/install_ha_output.golden b/cli/cmd/testdata/install_ha_output.golden index d4c10e651c5bf..739a4c6fea6f6 100644 --- a/cli/cmd/testdata/install_ha_output.golden +++ b/cli/cmd/testdata/install_ha_output.golden @@ -3071,6 +3071,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -3081,7 +3084,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -3096,7 +3123,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -3148,50 +3186,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/install_ha_with_overrides_output.golden b/cli/cmd/testdata/install_ha_with_overrides_output.golden index c8bf56698a79f..3080d3a75ffd8 100644 --- a/cli/cmd/testdata/install_ha_with_overrides_output.golden +++ b/cli/cmd/testdata/install_ha_with_overrides_output.golden @@ -3071,6 +3071,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -3081,7 +3084,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -3096,7 +3123,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -3148,50 +3186,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/install_heartbeat_disabled_output.golden b/cli/cmd/testdata/install_heartbeat_disabled_output.golden index 05c33c982f14d..adee2618d75ef 100644 --- a/cli/cmd/testdata/install_heartbeat_disabled_output.golden +++ b/cli/cmd/testdata/install_heartbeat_disabled_output.golden @@ -2719,6 +2719,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2729,7 +2732,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2744,7 +2771,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2796,50 +2834,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/install_helm_output.golden b/cli/cmd/testdata/install_helm_output.golden index 85fd636d849c6..0fb045717f84a 100644 --- a/cli/cmd/testdata/install_helm_output.golden +++ b/cli/cmd/testdata/install_helm_output.golden @@ -2851,6 +2851,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2861,7 +2864,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2876,7 +2903,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_helm_output_addons.golden b/cli/cmd/testdata/install_helm_output_addons.golden index 3741f97a20e89..6d6b6164d0f33 100644 --- a/cli/cmd/testdata/install_helm_output_addons.golden +++ b/cli/cmd/testdata/install_helm_output_addons.golden @@ -2858,6 +2858,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2868,7 +2871,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2883,7 +2910,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_helm_output_ha.golden b/cli/cmd/testdata/install_helm_output_ha.golden index 1754bdbac58f5..db52ee80bedde 100644 --- a/cli/cmd/testdata/install_helm_output_ha.golden +++ b/cli/cmd/testdata/install_helm_output_ha.golden @@ -3114,6 +3114,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -3124,7 +3127,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -3139,7 +3166,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_no_init_container.golden b/cli/cmd/testdata/install_no_init_container.golden index 55840049aad05..d87adecb9ffcf 100644 --- a/cli/cmd/testdata/install_no_init_container.golden +++ b/cli/cmd/testdata/install_no_init_container.golden @@ -2541,6 +2541,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2551,7 +2554,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2566,7 +2593,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2618,50 +2656,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/install_output.golden b/cli/cmd/testdata/install_output.golden index f9415fb71cc8b..fe4d790765e37 100644 --- a/cli/cmd/testdata/install_output.golden +++ b/cli/cmd/testdata/install_output.golden @@ -2801,6 +2801,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2811,7 +2814,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2826,7 +2853,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2878,50 +2916,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/install_prometheus_overwrite.golden b/cli/cmd/testdata/install_prometheus_overwrite.golden index 658f8376a58a0..c61955ce7075f 100644 --- a/cli/cmd/testdata/install_prometheus_overwrite.golden +++ b/cli/cmd/testdata/install_prometheus_overwrite.golden @@ -2845,6 +2845,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2855,7 +2858,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2870,7 +2897,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_proxy_ignores.golden b/cli/cmd/testdata/install_proxy_ignores.golden index a3b453454695a..e9a95e8a454c6 100644 --- a/cli/cmd/testdata/install_proxy_ignores.golden +++ b/cli/cmd/testdata/install_proxy_ignores.golden @@ -2808,6 +2808,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2818,7 +2821,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2833,7 +2860,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2885,50 +2923,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/install_restricted_dashboard.golden b/cli/cmd/testdata/install_restricted_dashboard.golden index 85e7d8fe246a0..1a97cf386fac2 100644 --- a/cli/cmd/testdata/install_restricted_dashboard.golden +++ b/cli/cmd/testdata/install_restricted_dashboard.golden @@ -2740,6 +2740,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2750,7 +2753,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2765,7 +2792,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2817,50 +2855,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/install_tracing.golden b/cli/cmd/testdata/install_tracing.golden index ab5337f30a270..bfc453e9458ba 100644 --- a/cli/cmd/testdata/install_tracing.golden +++ b/cli/cmd/testdata/install_tracing.golden @@ -2815,6 +2815,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2825,7 +2828,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2840,7 +2867,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2892,50 +2930,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/install_tracing_overwrite.golden b/cli/cmd/testdata/install_tracing_overwrite.golden index 4b575dc00a368..9d5d6954916fd 100644 --- a/cli/cmd/testdata/install_tracing_overwrite.golden +++ b/cli/cmd/testdata/install_tracing_overwrite.golden @@ -2815,6 +2815,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2825,7 +2828,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2840,7 +2867,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2892,50 +2930,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/upgrade_add-on_controlplane.golden b/cli/cmd/testdata/upgrade_add-on_controlplane.golden index 3786aa5d64c34..e5900b196a69e 100644 --- a/cli/cmd/testdata/upgrade_add-on_controlplane.golden +++ b/cli/cmd/testdata/upgrade_add-on_controlplane.golden @@ -1988,6 +1988,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -1998,7 +2001,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2013,7 +2040,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2065,50 +2103,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/upgrade_add-on_overwrite.golden b/cli/cmd/testdata/upgrade_add-on_overwrite.golden index 8d1941eade296..b7d0fb7cec67e 100644 --- a/cli/cmd/testdata/upgrade_add-on_overwrite.golden +++ b/cli/cmd/testdata/upgrade_add-on_overwrite.golden @@ -2833,6 +2833,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2843,7 +2846,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2858,7 +2885,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2910,50 +2948,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/upgrade_add_add-on.golden b/cli/cmd/testdata/upgrade_add_add-on.golden index 6e4155c447dac..7f950169f6bae 100644 --- a/cli/cmd/testdata/upgrade_add_add-on.golden +++ b/cli/cmd/testdata/upgrade_add_add-on.golden @@ -2831,6 +2831,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2841,7 +2844,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2856,7 +2883,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2908,50 +2946,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/upgrade_default.golden b/cli/cmd/testdata/upgrade_default.golden index 1c2ff494dd714..5c905df5c919e 100644 --- a/cli/cmd/testdata/upgrade_default.golden +++ b/cli/cmd/testdata/upgrade_default.golden @@ -2824,6 +2824,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2834,7 +2837,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2849,7 +2876,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2901,50 +2939,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/upgrade_external_issuer.golden b/cli/cmd/testdata/upgrade_external_issuer.golden index 799d27e679b61..a5ce611666663 100644 --- a/cli/cmd/testdata/upgrade_external_issuer.golden +++ b/cli/cmd/testdata/upgrade_external_issuer.golden @@ -2810,6 +2810,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2820,7 +2823,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2835,7 +2862,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2887,50 +2925,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml index 1e5be061fcb47..6d9c113e7679e 100644 --- a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml @@ -2824,6 +2824,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2834,7 +2837,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2849,7 +2876,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2901,50 +2939,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/upgrade_grafana_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_disabled.yaml index 81ca5e0453d9f..eae727dc9001d 100644 --- a/cli/cmd/testdata/upgrade_grafana_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_disabled.yaml @@ -2513,6 +2513,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2523,7 +2526,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2538,7 +2565,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2590,50 +2628,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/upgrade_grafana_enabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled.yaml index 1e5be061fcb47..6d9c113e7679e 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled.yaml @@ -2824,6 +2824,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2834,7 +2837,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2849,7 +2876,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2901,50 +2939,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml index 81ca5e0453d9f..eae727dc9001d 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml @@ -2513,6 +2513,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2523,7 +2526,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2538,7 +2565,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2590,50 +2628,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml index ec287a6b83944..ae90835b3e9e7 100644 --- a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml @@ -2824,6 +2824,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2834,7 +2837,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2849,7 +2876,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2901,50 +2939,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/upgrade_ha.golden b/cli/cmd/testdata/upgrade_ha.golden index 2f011cccf0c5b..5164144347eff 100644 --- a/cli/cmd/testdata/upgrade_ha.golden +++ b/cli/cmd/testdata/upgrade_ha.golden @@ -3087,6 +3087,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -3097,7 +3100,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -3112,7 +3139,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -3164,50 +3202,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden index 230078a4b07f3..e980792a561d7 100644 --- a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden +++ b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden @@ -2824,6 +2824,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2834,7 +2837,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2849,7 +2876,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2901,50 +2939,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/upgrade_nothing_addon.yaml b/cli/cmd/testdata/upgrade_nothing_addon.yaml index 1e5be061fcb47..6d9c113e7679e 100644 --- a/cli/cmd/testdata/upgrade_nothing_addon.yaml +++ b/cli/cmd/testdata/upgrade_nothing_addon.yaml @@ -2824,6 +2824,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2834,7 +2837,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2849,7 +2876,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2901,50 +2939,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/upgrade_overwrite_issuer.golden b/cli/cmd/testdata/upgrade_overwrite_issuer.golden index 4ee0df92de1b9..1da533c84d776 100644 --- a/cli/cmd/testdata/upgrade_overwrite_issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_issuer.golden @@ -2808,6 +2808,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2818,7 +2821,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2833,7 +2860,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2885,50 +2923,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden index 1e319c927c40c..b26db07cc8b7e 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden @@ -2794,6 +2794,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2804,7 +2807,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2819,7 +2846,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2871,50 +2909,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden index 4ee0df92de1b9..1da533c84d776 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden @@ -2808,6 +2808,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2818,7 +2821,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2833,7 +2860,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2885,50 +2923,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 diff --git a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden index f5528e0faab9a..48a84ed18419a 100644 --- a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden +++ b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden @@ -2824,6 +2824,9 @@ data: - /etc/prometheus/*_rules.yaml scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2834,7 +2837,31 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ - + # Required for: https://grafana.com/grafana/dashboards/315 + - job_name: 'kubernetes-nodes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [__name__] + regex: '(container|machine)_(cpu|memory|network|fs)_(.+)' + action: keep + - source_labels: [__name__] + regex: 'container_memory_failures_total' # unneeded large metric + action: drop - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2849,7 +2876,18 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component - + - job_name: 'linkerd-service-mirror' + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_label_linkerd_io_control_plane_component + - __meta_kubernetes_pod_container_port_name + action: keep + regex: linkerd-service-mirror;admin-http$ + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: component - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod @@ -2901,50 +2939,6 @@ data: # Copy tmp labels into real labels - action: labelmap regex: __tmp_pod_label_(.+) - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: kubernetes-nodes-cadvisor - kubernetes_sd_configs: - - role: node - metric_relabel_configs: - - action: keep - regex: (container|machine)_(cpu|memory|network|fs)_(.+) - source_labels: - - __name__ - - action: drop - regex: container_memory_failures_total - source_labels: - - __name__ - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - - job_name: linkerd-service-mirror - kubernetes_sd_configs: - - role: pod - relabel_configs: - - action: keep - regex: linkerd-service-mirror;admin-http$ - source_labels: - - __meta_kubernetes_pod_label_linkerd_io_control_plane_component - - __meta_kubernetes_pod_container_port_name - - action: replace - source_labels: - - __meta_kubernetes_pod_container_name - target_label: component --- kind: Service apiVersion: v1 From 946f7f83bd3d45b7ef3677722fee578a194e42bd Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Wed, 24 Jun 2020 11:40:32 +0000 Subject: [PATCH 34/42] add correct scrape config indentation Signed-off-by: Tarun Pothulapati --- charts/add-ons/prometheus/templates/prometheus.yaml | 5 +++++ cli/cmd/testdata/install_addon_control-plane.golden | 5 +++++ cli/cmd/testdata/install_control-plane.golden | 5 +++++ cli/cmd/testdata/install_controlplane_tracing_output.golden | 5 +++++ cli/cmd/testdata/install_custom_registry.golden | 5 +++++ cli/cmd/testdata/install_default.golden | 5 +++++ .../testdata/install_default_override_dst_get_nets.golden | 5 +++++ cli/cmd/testdata/install_grafana_existing.golden | 5 +++++ cli/cmd/testdata/install_ha_output.golden | 5 +++++ cli/cmd/testdata/install_ha_with_overrides_output.golden | 5 +++++ cli/cmd/testdata/install_heartbeat_disabled_output.golden | 5 +++++ cli/cmd/testdata/install_helm_output.golden | 5 +++++ cli/cmd/testdata/install_helm_output_addons.golden | 5 +++++ cli/cmd/testdata/install_helm_output_ha.golden | 5 +++++ cli/cmd/testdata/install_no_init_container.golden | 5 +++++ cli/cmd/testdata/install_output.golden | 5 +++++ cli/cmd/testdata/install_prometheus_overwrite.golden | 5 +++++ cli/cmd/testdata/install_proxy_ignores.golden | 5 +++++ cli/cmd/testdata/install_restricted_dashboard.golden | 5 +++++ cli/cmd/testdata/install_tracing.golden | 5 +++++ cli/cmd/testdata/install_tracing_overwrite.golden | 5 +++++ cli/cmd/testdata/upgrade_add-on_controlplane.golden | 5 +++++ cli/cmd/testdata/upgrade_add-on_overwrite.golden | 5 +++++ cli/cmd/testdata/upgrade_add_add-on.golden | 5 +++++ cli/cmd/testdata/upgrade_default.golden | 5 +++++ cli/cmd/testdata/upgrade_external_issuer.golden | 5 +++++ cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml | 5 +++++ cli/cmd/testdata/upgrade_grafana_disabled.yaml | 5 +++++ cli/cmd/testdata/upgrade_grafana_enabled.yaml | 5 +++++ cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml | 5 +++++ cli/cmd/testdata/upgrade_grafana_overwrite.yaml | 5 +++++ cli/cmd/testdata/upgrade_ha.golden | 5 +++++ cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden | 5 +++++ cli/cmd/testdata/upgrade_nothing_addon.yaml | 5 +++++ cli/cmd/testdata/upgrade_overwrite_issuer.golden | 5 +++++ .../upgrade_overwrite_trust_anchors-external-issuer.golden | 5 +++++ cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden | 5 +++++ cli/cmd/testdata/upgrade_two_level_webhook_cert.golden | 5 +++++ 38 files changed, 190 insertions(+) diff --git a/charts/add-ons/prometheus/templates/prometheus.yaml b/charts/add-ons/prometheus/templates/prometheus.yaml index 3dc9466adee74..767bc76fab1cd 100644 --- a/charts/add-ons/prometheus/templates/prometheus.yaml +++ b/charts/add-ons/prometheus/templates/prometheus.yaml @@ -28,6 +28,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -38,6 +39,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -63,6 +65,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -77,6 +80,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -89,6 +93,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_addon_control-plane.golden b/cli/cmd/testdata/install_addon_control-plane.golden index f962b660e9a23..04f19c90feb59 100644 --- a/cli/cmd/testdata/install_addon_control-plane.golden +++ b/cli/cmd/testdata/install_addon_control-plane.golden @@ -1975,6 +1975,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -1985,6 +1986,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2010,6 +2012,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2024,6 +2027,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2036,6 +2040,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_control-plane.golden b/cli/cmd/testdata/install_control-plane.golden index f269d0e521df5..ac011d4f3981a 100644 --- a/cli/cmd/testdata/install_control-plane.golden +++ b/cli/cmd/testdata/install_control-plane.golden @@ -1968,6 +1968,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -1978,6 +1979,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2003,6 +2005,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2017,6 +2020,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2029,6 +2033,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_controlplane_tracing_output.golden b/cli/cmd/testdata/install_controlplane_tracing_output.golden index 8baedd05e41fa..0f7b5b4abe166 100644 --- a/cli/cmd/testdata/install_controlplane_tracing_output.golden +++ b/cli/cmd/testdata/install_controlplane_tracing_output.golden @@ -2928,6 +2928,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2938,6 +2939,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2963,6 +2965,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2977,6 +2980,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2989,6 +2993,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_custom_registry.golden b/cli/cmd/testdata/install_custom_registry.golden index 45a8ee6524391..ffac6454b551f 100644 --- a/cli/cmd/testdata/install_custom_registry.golden +++ b/cli/cmd/testdata/install_custom_registry.golden @@ -2811,6 +2811,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2821,6 +2822,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2846,6 +2848,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2860,6 +2863,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2872,6 +2876,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_default.golden b/cli/cmd/testdata/install_default.golden index 7813ab075e8b7..d8f4a3ca59ddf 100644 --- a/cli/cmd/testdata/install_default.golden +++ b/cli/cmd/testdata/install_default.golden @@ -2811,6 +2811,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2821,6 +2822,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2846,6 +2848,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2860,6 +2863,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2872,6 +2876,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_default_override_dst_get_nets.golden b/cli/cmd/testdata/install_default_override_dst_get_nets.golden index 68a0b913ea1f9..684dfd723887f 100644 --- a/cli/cmd/testdata/install_default_override_dst_get_nets.golden +++ b/cli/cmd/testdata/install_default_override_dst_get_nets.golden @@ -2811,6 +2811,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2821,6 +2822,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2846,6 +2848,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2860,6 +2863,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2872,6 +2876,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_grafana_existing.golden b/cli/cmd/testdata/install_grafana_existing.golden index 9d67a1503198c..4f314e1ad06f3 100644 --- a/cli/cmd/testdata/install_grafana_existing.golden +++ b/cli/cmd/testdata/install_grafana_existing.golden @@ -2503,6 +2503,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2513,6 +2514,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2538,6 +2540,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2552,6 +2555,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2564,6 +2568,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_ha_output.golden b/cli/cmd/testdata/install_ha_output.golden index 739a4c6fea6f6..a0b113c7a18eb 100644 --- a/cli/cmd/testdata/install_ha_output.golden +++ b/cli/cmd/testdata/install_ha_output.golden @@ -3074,6 +3074,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -3084,6 +3085,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -3109,6 +3111,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -3123,6 +3126,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -3135,6 +3139,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_ha_with_overrides_output.golden b/cli/cmd/testdata/install_ha_with_overrides_output.golden index 3080d3a75ffd8..1a1da020c9f23 100644 --- a/cli/cmd/testdata/install_ha_with_overrides_output.golden +++ b/cli/cmd/testdata/install_ha_with_overrides_output.golden @@ -3074,6 +3074,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -3084,6 +3085,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -3109,6 +3111,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -3123,6 +3126,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -3135,6 +3139,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_heartbeat_disabled_output.golden b/cli/cmd/testdata/install_heartbeat_disabled_output.golden index adee2618d75ef..ebe5aa7c529b3 100644 --- a/cli/cmd/testdata/install_heartbeat_disabled_output.golden +++ b/cli/cmd/testdata/install_heartbeat_disabled_output.golden @@ -2722,6 +2722,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2732,6 +2733,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2757,6 +2759,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2771,6 +2774,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2783,6 +2787,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_helm_output.golden b/cli/cmd/testdata/install_helm_output.golden index 0fb045717f84a..d3ae7d60a7dc3 100644 --- a/cli/cmd/testdata/install_helm_output.golden +++ b/cli/cmd/testdata/install_helm_output.golden @@ -2854,6 +2854,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2864,6 +2865,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2889,6 +2891,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2903,6 +2906,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2915,6 +2919,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_helm_output_addons.golden b/cli/cmd/testdata/install_helm_output_addons.golden index 6d6b6164d0f33..1172973184e42 100644 --- a/cli/cmd/testdata/install_helm_output_addons.golden +++ b/cli/cmd/testdata/install_helm_output_addons.golden @@ -2861,6 +2861,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2871,6 +2872,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2896,6 +2898,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2910,6 +2913,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2922,6 +2926,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_helm_output_ha.golden b/cli/cmd/testdata/install_helm_output_ha.golden index db52ee80bedde..213d4b9b63b38 100644 --- a/cli/cmd/testdata/install_helm_output_ha.golden +++ b/cli/cmd/testdata/install_helm_output_ha.golden @@ -3117,6 +3117,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -3127,6 +3128,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -3152,6 +3154,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -3166,6 +3169,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -3178,6 +3182,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_no_init_container.golden b/cli/cmd/testdata/install_no_init_container.golden index d87adecb9ffcf..a6dd92355101e 100644 --- a/cli/cmd/testdata/install_no_init_container.golden +++ b/cli/cmd/testdata/install_no_init_container.golden @@ -2544,6 +2544,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2554,6 +2555,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2579,6 +2581,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2593,6 +2596,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2605,6 +2609,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_output.golden b/cli/cmd/testdata/install_output.golden index fe4d790765e37..a0f248f7b94f7 100644 --- a/cli/cmd/testdata/install_output.golden +++ b/cli/cmd/testdata/install_output.golden @@ -2804,6 +2804,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2814,6 +2815,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2839,6 +2841,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2853,6 +2856,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2865,6 +2869,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_prometheus_overwrite.golden b/cli/cmd/testdata/install_prometheus_overwrite.golden index c61955ce7075f..1ef6a105bffea 100644 --- a/cli/cmd/testdata/install_prometheus_overwrite.golden +++ b/cli/cmd/testdata/install_prometheus_overwrite.golden @@ -2848,6 +2848,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2858,6 +2859,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2883,6 +2885,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2897,6 +2900,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2909,6 +2913,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_proxy_ignores.golden b/cli/cmd/testdata/install_proxy_ignores.golden index e9a95e8a454c6..ab71520328a3e 100644 --- a/cli/cmd/testdata/install_proxy_ignores.golden +++ b/cli/cmd/testdata/install_proxy_ignores.golden @@ -2811,6 +2811,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2821,6 +2822,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2846,6 +2848,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2860,6 +2863,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2872,6 +2876,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_restricted_dashboard.golden b/cli/cmd/testdata/install_restricted_dashboard.golden index 1a97cf386fac2..8aa4eeed16ad3 100644 --- a/cli/cmd/testdata/install_restricted_dashboard.golden +++ b/cli/cmd/testdata/install_restricted_dashboard.golden @@ -2743,6 +2743,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2753,6 +2754,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2778,6 +2780,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2792,6 +2795,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2804,6 +2808,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_tracing.golden b/cli/cmd/testdata/install_tracing.golden index bfc453e9458ba..f4d5a57bf1368 100644 --- a/cli/cmd/testdata/install_tracing.golden +++ b/cli/cmd/testdata/install_tracing.golden @@ -2818,6 +2818,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2828,6 +2829,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2853,6 +2855,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2867,6 +2870,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2879,6 +2883,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/install_tracing_overwrite.golden b/cli/cmd/testdata/install_tracing_overwrite.golden index 9d5d6954916fd..f1db3c3deeb31 100644 --- a/cli/cmd/testdata/install_tracing_overwrite.golden +++ b/cli/cmd/testdata/install_tracing_overwrite.golden @@ -2818,6 +2818,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2828,6 +2829,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2853,6 +2855,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2867,6 +2870,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2879,6 +2883,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/upgrade_add-on_controlplane.golden b/cli/cmd/testdata/upgrade_add-on_controlplane.golden index e5900b196a69e..5b9c0d20ef4ad 100644 --- a/cli/cmd/testdata/upgrade_add-on_controlplane.golden +++ b/cli/cmd/testdata/upgrade_add-on_controlplane.golden @@ -1991,6 +1991,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2001,6 +2002,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2026,6 +2028,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2040,6 +2043,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2052,6 +2056,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/upgrade_add-on_overwrite.golden b/cli/cmd/testdata/upgrade_add-on_overwrite.golden index b7d0fb7cec67e..896b154e50aaf 100644 --- a/cli/cmd/testdata/upgrade_add-on_overwrite.golden +++ b/cli/cmd/testdata/upgrade_add-on_overwrite.golden @@ -2836,6 +2836,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2846,6 +2847,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2871,6 +2873,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2885,6 +2888,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2897,6 +2901,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/upgrade_add_add-on.golden b/cli/cmd/testdata/upgrade_add_add-on.golden index 7f950169f6bae..414ceafd3ed89 100644 --- a/cli/cmd/testdata/upgrade_add_add-on.golden +++ b/cli/cmd/testdata/upgrade_add_add-on.golden @@ -2834,6 +2834,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2844,6 +2845,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2869,6 +2871,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2883,6 +2886,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2895,6 +2899,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/upgrade_default.golden b/cli/cmd/testdata/upgrade_default.golden index 5c905df5c919e..72017e80221af 100644 --- a/cli/cmd/testdata/upgrade_default.golden +++ b/cli/cmd/testdata/upgrade_default.golden @@ -2827,6 +2827,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2837,6 +2838,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2862,6 +2864,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2876,6 +2879,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2888,6 +2892,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/upgrade_external_issuer.golden b/cli/cmd/testdata/upgrade_external_issuer.golden index a5ce611666663..63116450674cd 100644 --- a/cli/cmd/testdata/upgrade_external_issuer.golden +++ b/cli/cmd/testdata/upgrade_external_issuer.golden @@ -2813,6 +2813,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2823,6 +2824,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2848,6 +2850,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2862,6 +2865,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2874,6 +2878,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml index 6d9c113e7679e..a18438e42a633 100644 --- a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml @@ -2827,6 +2827,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2837,6 +2838,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2862,6 +2864,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2876,6 +2879,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2888,6 +2892,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/upgrade_grafana_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_disabled.yaml index eae727dc9001d..c72d50a0a4d85 100644 --- a/cli/cmd/testdata/upgrade_grafana_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_disabled.yaml @@ -2516,6 +2516,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2526,6 +2527,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2551,6 +2553,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2565,6 +2568,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2577,6 +2581,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/upgrade_grafana_enabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled.yaml index 6d9c113e7679e..a18438e42a633 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled.yaml @@ -2827,6 +2827,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2837,6 +2838,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2862,6 +2864,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2876,6 +2879,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2888,6 +2892,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml index eae727dc9001d..c72d50a0a4d85 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml @@ -2516,6 +2516,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2526,6 +2527,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2551,6 +2553,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2565,6 +2568,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2577,6 +2581,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml index ae90835b3e9e7..7f4e2657566aa 100644 --- a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml @@ -2827,6 +2827,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2837,6 +2838,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2862,6 +2864,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2876,6 +2879,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2888,6 +2892,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/upgrade_ha.golden b/cli/cmd/testdata/upgrade_ha.golden index 5164144347eff..1b1302a18be50 100644 --- a/cli/cmd/testdata/upgrade_ha.golden +++ b/cli/cmd/testdata/upgrade_ha.golden @@ -3090,6 +3090,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -3100,6 +3101,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -3125,6 +3127,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -3139,6 +3142,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -3151,6 +3155,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden index e980792a561d7..8d63726ed75a1 100644 --- a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden +++ b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden @@ -2827,6 +2827,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2837,6 +2838,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2862,6 +2864,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2876,6 +2879,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2888,6 +2892,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/upgrade_nothing_addon.yaml b/cli/cmd/testdata/upgrade_nothing_addon.yaml index 6d9c113e7679e..a18438e42a633 100644 --- a/cli/cmd/testdata/upgrade_nothing_addon.yaml +++ b/cli/cmd/testdata/upgrade_nothing_addon.yaml @@ -2827,6 +2827,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2837,6 +2838,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2862,6 +2864,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2876,6 +2879,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2888,6 +2892,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/upgrade_overwrite_issuer.golden b/cli/cmd/testdata/upgrade_overwrite_issuer.golden index 1da533c84d776..db8f4175041ea 100644 --- a/cli/cmd/testdata/upgrade_overwrite_issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_issuer.golden @@ -2811,6 +2811,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2821,6 +2822,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2846,6 +2848,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2860,6 +2863,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2872,6 +2876,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden index b26db07cc8b7e..f25fc267dbce0 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden @@ -2797,6 +2797,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2807,6 +2808,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2832,6 +2834,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2846,6 +2849,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2858,6 +2862,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden index 1da533c84d776..db8f4175041ea 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden @@ -2811,6 +2811,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2821,6 +2822,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2846,6 +2848,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2860,6 +2863,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2872,6 +2876,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod diff --git a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden index 48a84ed18419a..ae4b95e9ff11b 100644 --- a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden +++ b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden @@ -2827,6 +2827,7 @@ data: - job_name: 'prometheus' static_configs: - targets: ['localhost:9090'] + - job_name: 'grafana' kubernetes_sd_configs: - role: pod @@ -2837,6 +2838,7 @@ data: - __meta_kubernetes_pod_container_name action: keep regex: ^grafana$ + # Required for: https://grafana.com/grafana/dashboards/315 - job_name: 'kubernetes-nodes-cadvisor' scheme: https @@ -2862,6 +2864,7 @@ data: - source_labels: [__name__] regex: 'container_memory_failures_total' # unneeded large metric action: drop + - job_name: 'linkerd-controller' kubernetes_sd_configs: - role: pod @@ -2876,6 +2879,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-service-mirror' kubernetes_sd_configs: - role: pod @@ -2888,6 +2892,7 @@ data: - source_labels: [__meta_kubernetes_pod_container_name] action: replace target_label: component + - job_name: 'linkerd-proxy' kubernetes_sd_configs: - role: pod From f6633739569eb241ef879488ef90544b68d2f536 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Thu, 25 Jun 2020 07:31:40 +0000 Subject: [PATCH 35/42] update prometheus add-on to include proxy resources along with tests Signed-off-by: Tarun Pothulapati --- charts/add-ons/prometheus/templates/prometheus.yaml | 6 ++++-- charts/linkerd2/README.md | 1 + charts/linkerd2/values.yaml | 2 ++ test/install_test.go | 4 ++-- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/charts/add-ons/prometheus/templates/prometheus.yaml b/charts/add-ons/prometheus/templates/prometheus.yaml index 3b639454cf3ce..853ee7d885c47 100644 --- a/charts/add-ons/prometheus/templates/prometheus.yaml +++ b/charts/add-ons/prometheus/templates/prometheus.yaml @@ -263,10 +263,12 @@ spec: subPath: prometheus.yml readOnly: true {{- $tree := deepCopy . }} - {{- if not (empty .Values.prometheusProxyResources) }} - {{- $r := merge .Values.prometheusProxyResources .Values.global.proxy.resources }} + {{- if not (empty .Values.proxy) }} + {{- if not (empty .Values.proxy.resources) }} + {{- $r := merge .Values.proxy.resources .Values.global.proxy.resources }} {{- $_ := set $tree.Values.global.proxy "resources" $r }} {{- end }} + {{- end }} - {{- include "partials.proxy" $tree | indent 8 | trimPrefix (repeat 7 " ") }} {{ if not .Values.global.cniEnabled -}} initContainers: diff --git a/charts/linkerd2/README.md b/charts/linkerd2/README.md index b4cd563e5f07a..bfb00a23fdf24 100644 --- a/charts/linkerd2/README.md +++ b/charts/linkerd2/README.md @@ -216,6 +216,7 @@ The following table lists the configurable parameters for the Prometheus Add-On. | `prometheus.args` | Command line options for Prometheus binary | `storage.tsdb.path: /data, storage.tsdb.retention.time: 6h, config.file: /etc/prometheus/prometheus.yml, log.level: *controller_log_level` | | `prometheus.globalConfig` | The global configuration specifies parameters that are valid in all other configuration contexts. | `scrape_interval: 10s, scrape_timeout: 10s, evaluation_interval: 10s` | | `prometheus.image` | Docker image for the prometheus instance | `prom/prometheus:v2.15.2` | +| `prometheus.proxy.resources` | CPU and Memory resources required by proxy injected into prometheus pod (see `global.proxy.resources` for sub-fields) | values in `global.proxy.resources` | | `prometheus.resources.cpu.limit` | Maximum amount of CPU units that the prometheus container can use || | `prometheus.resources.cpu.request` | Amount of CPU units that the prometheus container requests || | `prometheus.resources.memory.limit` | Maximum amount of memory that prometheus container can use || diff --git a/charts/linkerd2/values.yaml b/charts/linkerd2/values.yaml index e3890c3ba2a62..edfbb5fe53911 100644 --- a/charts/linkerd2/values.yaml +++ b/charts/linkerd2/values.yaml @@ -286,6 +286,8 @@ prometheus: # subPath: recording_rules.yml # configMap: linkerd-prometheus-rules # resources: + # proxy: + # resources: tracing: enabled: false diff --git a/test/install_test.go b/test/install_test.go index 780e69286c8d1..f6202db28fe7b 100644 --- a/test/install_test.go +++ b/test/install_test.go @@ -462,8 +462,8 @@ func TestUpgradeHelm(t *testing.T) { "--set", "grafana.proxy.resources.memory.request=103Mi", "--set", "identityProxyResources.cpu.limit=1040m", "--set", "identityProxyResources.memory.request=104Mi", - "--set", "prometheusProxyResources.cpu.limit=1050m", - "--set", "prometheusProxyResources.memory.request=105Mi", + "--set", "prometheus.proxy.resources.cpu.limit=1050m", + "--set", "prometheus.proxy.resources.memory.request=105Mi", "--set", "proxyInjectorProxyResources.cpu.limit=1060m", "--set", "proxyInjectorProxyResources.memory.request=106Mi", "--set", "smiMetricsProxyResources.cpu.limit=1070m", From 725dc2e91abccb6ac513d557d5c207e21db9fc34 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Mon, 29 Jun 2020 12:01:36 +0000 Subject: [PATCH 36/42] add instructions in prometheus/values.yaml Signed-off-by: Tarun Pothulapati --- charts/add-ons/prometheus/values.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/charts/add-ons/prometheus/values.yaml b/charts/add-ons/prometheus/values.yaml index d3d60d2f2b6ba..5a91f49381c8b 100644 --- a/charts/add-ons/prometheus/values.yaml +++ b/charts/add-ons/prometheus/values.yaml @@ -1,3 +1,8 @@ +# This add-on's default property values are declared in `charts/add-ons/prometheus/values.yaml`. +# If installing/upgrading with Helm, you can override them through the usual `--set` or `-f` flags +# when installing with the parent linkerd2 chart +# Do not override them in this file! +# If installing/upgrading with linkerd's CLI, use the `--addon-config` flag. image: prom/prometheus:v2.15.2 args: storage.tsdb.path: /data From a1650eef8cb9833fcac39cfefd26478ad2aa47fa Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Sat, 4 Jul 2020 19:49:38 +0530 Subject: [PATCH 37/42] update golden files Signed-off-by: Tarun Pothulapati --- ...install_controlplane_tracing_output.golden | 223 +--------------- .../testdata/install_custom_registry.golden | 209 +-------------- cli/cmd/testdata/install_default.golden | 209 +-------------- ...stall_default_override_dst_get_nets.golden | 209 +-------------- cli/cmd/testdata/install_ha_output.golden | 247 +---------------- .../install_ha_with_overrides_output.golden | 247 +---------------- .../install_heartbeat_disabled_output.golden | 209 +-------------- cli/cmd/testdata/install_helm_output.golden | 203 +------------- .../install_helm_output_addons.golden | 203 +------------- .../testdata/install_helm_output_ha.golden | 241 +---------------- .../testdata/install_no_init_container.golden | 176 +------------ cli/cmd/testdata/install_output.golden | 208 +-------------- .../install_prometheus_overwrite.golden | 20 +- cli/cmd/testdata/install_proxy_ignores.golden | 209 +-------------- .../install_restricted_dashboard.golden | 209 +-------------- cli/cmd/testdata/install_tracing.golden | 209 +-------------- .../testdata/install_tracing_overwrite.golden | 209 +-------------- .../testdata/upgrade_add-on_overwrite.golden | 211 +-------------- cli/cmd/testdata/upgrade_add_add-on.golden | 211 +-------------- cli/cmd/testdata/upgrade_default.golden | 211 +-------------- .../testdata/upgrade_external_issuer.golden | 211 +-------------- .../upgrade_grafana_addon_overwrite.yaml | 211 +-------------- cli/cmd/testdata/upgrade_grafana_enabled.yaml | 211 +-------------- .../testdata/upgrade_grafana_overwrite.yaml | 211 +-------------- cli/cmd/testdata/upgrade_ha.golden | 249 +----------------- .../upgrade_keep_webhook_cabundle.golden | 211 +-------------- cli/cmd/testdata/upgrade_nothing_addon.yaml | 211 +-------------- .../testdata/upgrade_overwrite_issuer.golden | 209 +-------------- ...write_trust_anchors-external-issuer.golden | 209 +-------------- .../upgrade_overwrite_trust_anchors.golden | 209 +-------------- .../upgrade_two_level_webhook_cert.golden | 211 +-------------- 31 files changed, 79 insertions(+), 6347 deletions(-) diff --git a/cli/cmd/testdata/install_controlplane_tracing_output.golden b/cli/cmd/testdata/install_controlplane_tracing_output.golden index 8fae73d2850ec..eef50559a978c 100644 --- a/cli/cmd/testdata/install_controlplane_tracing_output.golden +++ b/cli/cmd/testdata/install_controlplane_tracing_output.golden @@ -1822,227 +1822,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_TRACE_ATTRIBUTES_PATH - value: /var/run/linkerd/podinfo/labels - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TRACE_COLLECTOR_SVC_ADDR - value: linkerd-collector.linkerd.svc.cluster.local:55678 - - name: LINKERD2_PROXY_TRACE_COLLECTOR_SVC_NAME - value: linkerd-collector.linkerd.serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: var/run/linkerd/podinfo - name: podinfo - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - downwardAPI: - items: - - fieldRef: - fieldPath: metadata.labels - path: "labels" - name: podinfo - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3376,6 +3155,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/install_custom_registry.golden b/cli/cmd/testdata/install_custom_registry.golden index 163aa79c0486f..899a9b4d5ab22 100644 --- a/cli/cmd/testdata/install_custom_registry.golden +++ b/cli/cmd/testdata/install_custom_registry.golden @@ -1762,213 +1762,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: my.custom.registry/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: my.custom.registry/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: my.custom.registry/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3245,6 +3038,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/install_default.golden b/cli/cmd/testdata/install_default.golden index 35ef77b785ddf..220ca7d91dbb7 100644 --- a/cli/cmd/testdata/install_default.golden +++ b/cli/cmd/testdata/install_default.golden @@ -1762,213 +1762,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3245,6 +3038,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/install_default_override_dst_get_nets.golden b/cli/cmd/testdata/install_default_override_dst_get_nets.golden index e7b8562c25534..7dfc24d020877 100644 --- a/cli/cmd/testdata/install_default_override_dst_get_nets.golden +++ b/cli/cmd/testdata/install_default_override_dst_get_nets.golden @@ -1762,213 +1762,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.0.0.0/8" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3245,6 +3038,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/install_ha_output.golden b/cli/cmd/testdata/install_ha_output.golden index f2ed51b4fdc7f..c5e7938a1f642 100644 --- a/cli/cmd/testdata/install_ha_output.golden +++ b/cli/cmd/testdata/install_ha_output.golden @@ -1890,251 +1890,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 3 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - strategy: - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: failure-domain.beta.kubernetes.io/zone - weight: 100 - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: kubernetes.io/hostname - containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus - ports: - - containerPort: 9090 - name: admin-http - readinessProbe: - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - resources: - limits: - cpu: "4" - memory: "8192Mi" - requests: - cpu: "300m" - memory: "300Mi" - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - limits: - cpu: "1" - memory: "250Mi" - requests: - cpu: "100m" - memory: "20Mi" - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3553,6 +3308,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/install_ha_with_overrides_output.golden b/cli/cmd/testdata/install_ha_with_overrides_output.golden index 7a11638a763e7..d1a58c0424bc6 100644 --- a/cli/cmd/testdata/install_ha_with_overrides_output.golden +++ b/cli/cmd/testdata/install_ha_with_overrides_output.golden @@ -1890,251 +1890,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 2 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - strategy: - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: failure-domain.beta.kubernetes.io/zone - weight: 100 - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: kubernetes.io/hostname - containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus - ports: - - containerPort: 9090 - name: admin-http - readinessProbe: - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - resources: - limits: - cpu: "4" - memory: "8192Mi" - requests: - cpu: "300m" - memory: "300Mi" - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - limits: - cpu: "1" - memory: "250Mi" - requests: - cpu: "400m" - memory: "300Mi" - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3553,6 +3308,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/install_heartbeat_disabled_output.golden b/cli/cmd/testdata/install_heartbeat_disabled_output.golden index ba9f28cf05ceb..278776d760b7e 100644 --- a/cli/cmd/testdata/install_heartbeat_disabled_output.golden +++ b/cli/cmd/testdata/install_heartbeat_disabled_output.golden @@ -1673,213 +1673,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3156,6 +2949,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/install_helm_output.golden b/cli/cmd/testdata/install_helm_output.golden index c546be9790197..4478a8b9e344b 100644 --- a/cli/cmd/testdata/install_helm_output.golden +++ b/cli/cmd/testdata/install_helm_output.golden @@ -1826,207 +1826,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/helm-release-version: "0" - linkerd.io/created-by: linkerd/helm linkerd-version - linkerd.io/identity-mode: default - linkerd.io/proxy-version: test-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:linkerd-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - test-trust-anchor - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: test.trust.domain - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:test-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,222 - - --outbound-ports-to-ignore - - 443,111 - image: gcr.io/linkerd-io/proxy-init:test-proxy-init-version - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -# Source: linkerd2/templates/proxy-injector.yaml ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version @@ -3279,6 +3078,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/install_helm_output_addons.golden b/cli/cmd/testdata/install_helm_output_addons.golden index e049a55863bad..ae76c066f5c22 100644 --- a/cli/cmd/testdata/install_helm_output_addons.golden +++ b/cli/cmd/testdata/install_helm_output_addons.golden @@ -1827,207 +1827,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/helm-release-version: "0" - linkerd.io/created-by: linkerd/helm linkerd-version - linkerd.io/identity-mode: default - linkerd.io/proxy-version: test-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:linkerd-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - test-trust-anchor - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: test.trust.domain - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:test-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,222 - - --outbound-ports-to-ignore - - 443,111 - image: gcr.io/linkerd-io/proxy-init:test-proxy-init-version - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -# Source: linkerd2/templates/proxy-injector.yaml ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version @@ -3286,6 +3085,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/install_helm_output_ha.golden b/cli/cmd/testdata/install_helm_output_ha.golden index d6ababce25f6f..99ba025dbef5e 100644 --- a/cli/cmd/testdata/install_helm_output_ha.golden +++ b/cli/cmd/testdata/install_helm_output_ha.golden @@ -1954,245 +1954,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 3 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - strategy: - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - annotations: - linkerd.io/helm-release-version: "0" - linkerd.io/created-by: linkerd/helm linkerd-version - linkerd.io/identity-mode: default - linkerd.io/proxy-version: test-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: failure-domain.beta.kubernetes.io/zone - weight: 100 - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: kubernetes.io/hostname - containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus - ports: - - containerPort: 9090 - name: admin-http - readinessProbe: - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - resources: - limits: - cpu: "4" - memory: "8192Mi" - requests: - cpu: "300m" - memory: "300Mi" - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - test-trust-anchor - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: test.trust.domain - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:test-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - limits: - cpu: "1" - memory: "250Mi" - requests: - cpu: "100m" - memory: "20Mi" - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,222 - - --outbound-ports-to-ignore - - 443,111 - image: gcr.io/linkerd-io/proxy-init:test-proxy-init-version - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -# Source: linkerd2/templates/proxy-injector.yaml ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version @@ -3587,6 +3348,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/install_no_init_container.golden b/cli/cmd/testdata/install_no_init_container.golden index e217d4237eddf..86866018cd2c2 100644 --- a/cli/cmd/testdata/install_no_init_container.golden +++ b/cli/cmd/testdata/install_no_init_container.golden @@ -1627,180 +1627,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -2945,6 +2771,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/install_output.golden b/cli/cmd/testdata/install_output.golden index 0471b889cbed6..7216ff50d6641 100644 --- a/cli/cmd/testdata/install_output.golden +++ b/cli/cmd/testdata/install_output.golden @@ -1758,212 +1758,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - CreatedByAnnotation: CliVersion - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: ControllerImageVersion - ControllerComponentLabel: proxy-injector - ControllerNamespaceLabel: Namespace - name: linkerd-proxy-injector - namespace: Namespace -spec: - replicas: 1 - selector: - matchLabels: - ControllerComponentLabel: proxy-injector - template: - metadata: - annotations: - CreatedByAnnotation: CliVersion - linkerd.io/identity-mode: default - linkerd.io/proxy-version: ProxyVersion - labels: - ControllerComponentLabel: proxy-injector - ControllerNamespaceLabel: Namespace - WorkloadNamespaceLabel: Namespace - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=ControllerLogLevel - image: ControllerImage:ControllerImageVersion - imagePullPolicy: ImagePullPolicy - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.Namespace.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "DestinationGetNetworks" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.Namespace.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: Namespace - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: ProxyImageName:ProxyVersion - imagePullPolicy: ImagePullPolicy - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: ProxyInitImageName:ProxyInitVersion - imagePullPolicy: ImagePullPolicy - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: CreatedByAnnotation: CliVersion @@ -3237,6 +3031,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.Namespace.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/install_prometheus_overwrite.golden b/cli/cmd/testdata/install_prometheus_overwrite.golden index a4c90c245f7a9..731eac1c69295 100644 --- a/cli/cmd/testdata/install_prometheus_overwrite.golden +++ b/cli/cmd/testdata/install_prometheus_overwrite.golden @@ -802,7 +802,7 @@ data: global: | {"linkerdNamespace":"linkerd","cniEnabled":false,"version":"install-control-plane-version","identityContext":{"trustDomain":"cluster.local","trustAnchorsPem":"-----BEGIN CERTIFICATE-----\nMIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy\nLmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE\nAxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0\nxtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364\n6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF\nBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE\nAiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv\nOLO4Zsk1XrGZHGsmyiEyvYF9lpY=\n-----END CERTIFICATE-----\n","issuanceLifetime":"86400s","clockSkewAllowance":"20s","scheme":"linkerd.io/tls"},"autoInjectContext":null,"omitWebhookSideEffects":false,"clusterDomain":"cluster.local"} proxy: | - {"proxyImage":{"imageName":"gcr.io/linkerd-io/proxy","pullPolicy":"IfNotPresent"},"proxyInitImage":{"imageName":"gcr.io/linkerd-io/proxy-init","pullPolicy":"IfNotPresent"},"controlPort":{"port":4190},"ignoreInboundPorts":[],"ignoreOutboundPorts":[],"inboundPort":{"port":4143},"adminPort":{"port":4191},"outboundPort":{"port":4140},"resource":{"requestCpu":"","requestMemory":"","limitCpu":"","limitMemory":""},"proxyUid":"2102","logLevel":{"level":"warn,linkerd=info"},"disableExternalProfiles":true,"proxyVersion":"install-proxy-version","proxyInitImageVersion":"v1.3.3","debugImage":{"imageName":"gcr.io/linkerd-io/debug","pullPolicy":"IfNotPresent"},"debugImageVersion":"install-debug-version","destinationGetNetworks":"10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"} + {"proxyImage":{"imageName":"gcr.io/linkerd-io/proxy","pullPolicy":"IfNotPresent"},"proxyInitImage":{"imageName":"gcr.io/linkerd-io/proxy-init","pullPolicy":"IfNotPresent"},"controlPort":{"port":4190},"ignoreInboundPorts":[],"ignoreOutboundPorts":[],"inboundPort":{"port":4143},"adminPort":{"port":4191},"outboundPort":{"port":4140},"resource":{"requestCpu":"","requestMemory":"","limitCpu":"","limitMemory":""},"proxyUid":"2102","logLevel":{"level":"warn,linkerd=info"},"disableExternalProfiles":true,"proxyVersion":"install-proxy-version","proxyInitImageVersion":"v1.3.3","debugImage":{"imageName":"gcr.io/linkerd-io/debug","pullPolicy":"IfNotPresent"},"debugImageVersion":"install-debug-version","destinationGetNetworks":"10.0.0.0/8,172.16.0.0/12,192.168.0.0/16","logFormat":"plain"} install: | {"cliVersion":"dev-undefined","flags":[]} --- @@ -910,6 +910,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS @@ -1132,6 +1134,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS @@ -1351,6 +1355,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: localhost.:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS @@ -1619,6 +1625,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS @@ -1817,6 +1825,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS @@ -2054,6 +2064,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS @@ -2279,6 +2291,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS @@ -2638,6 +2652,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS @@ -3089,6 +3105,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/install_proxy_ignores.golden b/cli/cmd/testdata/install_proxy_ignores.golden index a351f73c75de5..09dacc2ad5a86 100644 --- a/cli/cmd/testdata/install_proxy_ignores.golden +++ b/cli/cmd/testdata/install_proxy_ignores.golden @@ -1762,213 +1762,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,22,8100-8102 - - --outbound-ports-to-ignore - - 443,5432 - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3245,6 +3038,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/install_restricted_dashboard.golden b/cli/cmd/testdata/install_restricted_dashboard.golden index 12b81f7ebdfa8..8a7f83c79e272 100644 --- a/cli/cmd/testdata/install_restricted_dashboard.golden +++ b/cli/cmd/testdata/install_restricted_dashboard.golden @@ -1694,213 +1694,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3177,6 +2970,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/install_tracing.golden b/cli/cmd/testdata/install_tracing.golden index 2cf2014cfad43..5d94983b9fc8a 100644 --- a/cli/cmd/testdata/install_tracing.golden +++ b/cli/cmd/testdata/install_tracing.golden @@ -1763,213 +1763,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3252,6 +3045,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/install_tracing_overwrite.golden b/cli/cmd/testdata/install_tracing_overwrite.golden index 10d19c80fc3d9..5c2d36976cb4e 100644 --- a/cli/cmd/testdata/install_tracing_overwrite.golden +++ b/cli/cmd/testdata/install_tracing_overwrite.golden @@ -1763,213 +1763,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3252,6 +3045,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/upgrade_add-on_overwrite.golden b/cli/cmd/testdata/upgrade_add-on_overwrite.golden index 85c228cf4757e..ae1088b573b38 100644 --- a/cli/cmd/testdata/upgrade_add-on_overwrite.golden +++ b/cli/cmd/testdata/upgrade_add-on_overwrite.golden @@ -1771,215 +1771,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3272,6 +3063,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/upgrade_add_add-on.golden b/cli/cmd/testdata/upgrade_add_add-on.golden index ea34611c755f1..d36fb5ce2e36b 100644 --- a/cli/cmd/testdata/upgrade_add_add-on.golden +++ b/cli/cmd/testdata/upgrade_add_add-on.golden @@ -1771,215 +1771,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3270,6 +3061,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/upgrade_default.golden b/cli/cmd/testdata/upgrade_default.golden index b789ad3ba20ac..74140a14b8701 100644 --- a/cli/cmd/testdata/upgrade_default.golden +++ b/cli/cmd/testdata/upgrade_default.golden @@ -1770,215 +1770,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,2525-2527,2529 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3263,6 +3054,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/upgrade_external_issuer.golden b/cli/cmd/testdata/upgrade_external_issuer.golden index d2c107a3ee8ef..802537c1f7fa7 100644 --- a/cli/cmd/testdata/upgrade_external_issuer.golden +++ b/cli/cmd/testdata/upgrade_external_issuer.golden @@ -1756,215 +1756,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3249,6 +3040,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml index 5a77ffe50e9dd..a7973c898188f 100644 --- a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml @@ -1770,215 +1770,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3263,6 +3054,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/upgrade_grafana_enabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled.yaml index 5a77ffe50e9dd..a7973c898188f 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled.yaml @@ -1770,215 +1770,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3263,6 +3054,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml index c7fde7ae5fe11..cd85d1d52e0c3 100644 --- a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml @@ -1770,215 +1770,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3263,6 +3054,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/upgrade_ha.golden b/cli/cmd/testdata/upgrade_ha.golden index 2d9064ed2c05b..859d31b11f1af 100644 --- a/cli/cmd/testdata/upgrade_ha.golden +++ b/cli/cmd/testdata/upgrade_ha.golden @@ -1898,253 +1898,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 3 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - strategy: - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: failure-domain.beta.kubernetes.io/zone - weight: 100 - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: linkerd.io/control-plane-component - operator: In - values: - - proxy-injector - topologyKey: kubernetes.io/hostname - containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus - ports: - - containerPort: 9090 - name: admin-http - readinessProbe: - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - resources: - limits: - cpu: "4" - memory: "8192Mi" - requests: - cpu: "300m" - memory: "300Mi" - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - limits: - cpu: "1" - memory: "250Mi" - requests: - cpu: "100m" - memory: "20Mi" - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3571,6 +3324,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden index de4fbd01b5379..f8ce6c655e815 100644 --- a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden +++ b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden @@ -1770,215 +1770,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,2525-2527,2529 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3263,6 +3054,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/upgrade_nothing_addon.yaml b/cli/cmd/testdata/upgrade_nothing_addon.yaml index 5a77ffe50e9dd..a7973c898188f 100644 --- a/cli/cmd/testdata/upgrade_nothing_addon.yaml +++ b/cli/cmd/testdata/upgrade_nothing_addon.yaml @@ -1770,215 +1770,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3263,6 +3054,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/upgrade_overwrite_issuer.golden b/cli/cmd/testdata/upgrade_overwrite_issuer.golden index 3a27b97dc69bb..a93e49ab9326d 100644 --- a/cli/cmd/testdata/upgrade_overwrite_issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_issuer.golden @@ -1762,213 +1762,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3245,6 +3038,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden index 2b629faa6174f..f49585b409dce 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden @@ -1748,213 +1748,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3231,6 +3024,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden index 3a27b97dc69bb..a93e49ab9326d 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden @@ -1762,213 +1762,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3245,6 +3038,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS diff --git a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden index 53d011b4be992..38734401dafbd 100644 --- a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden +++ b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden @@ -1770,215 +1770,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,2525-2527,2529 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3263,6 +3054,8 @@ spec: - env: - name: LINKERD2_PROXY_LOG value: warn,linkerd=info + - name: LINKERD2_PROXY_LOG_FORMAT + value: plain - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR value: linkerd-dst.linkerd.svc.cluster.local:8086 - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS From 97ecefad7ebb206ccac14d0007a7f283dd82669f Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Mon, 6 Jul 2020 23:28:57 +0530 Subject: [PATCH 38/42] make prometheus urls optional and based on add-on Signed-off-by: Tarun Pothulapati --- charts/linkerd2/templates/controller.yaml | 4 +++- charts/linkerd2/templates/heartbeat.yaml | 4 +++- charts/linkerd2/templates/psp.yaml | 2 ++ cli/cmd/testdata/install_addon_control-plane.golden | 4 ++-- cli/cmd/testdata/install_control-plane.golden | 4 ++-- cli/cmd/testdata/install_controlplane_tracing_output.golden | 4 ++-- cli/cmd/testdata/install_custom_registry.golden | 4 ++-- cli/cmd/testdata/install_default.golden | 4 ++-- cli/cmd/testdata/install_default_override_dst_get_nets.golden | 4 ++-- cli/cmd/testdata/install_grafana_existing.golden | 4 ++-- cli/cmd/testdata/install_ha_output.golden | 4 ++-- cli/cmd/testdata/install_ha_with_overrides_output.golden | 4 ++-- cli/cmd/testdata/install_heartbeat_disabled_output.golden | 2 +- cli/cmd/testdata/install_helm_output.golden | 4 ++-- cli/cmd/testdata/install_helm_output_addons.golden | 4 ++-- cli/cmd/testdata/install_helm_output_ha.golden | 4 ++-- cli/cmd/testdata/install_no_init_container.golden | 4 ++-- cli/cmd/testdata/install_output.golden | 4 ++-- cli/cmd/testdata/install_prometheus_overwrite.golden | 4 ++-- cli/cmd/testdata/install_proxy_ignores.golden | 4 ++-- cli/cmd/testdata/install_restricted_dashboard.golden | 4 ++-- cli/cmd/testdata/install_tracing.golden | 4 ++-- cli/cmd/testdata/install_tracing_overwrite.golden | 4 ++-- cli/cmd/testdata/upgrade_add-on_controlplane.golden | 4 ++-- cli/cmd/testdata/upgrade_add-on_overwrite.golden | 4 ++-- cli/cmd/testdata/upgrade_add_add-on.golden | 4 ++-- cli/cmd/testdata/upgrade_default.golden | 4 ++-- cli/cmd/testdata/upgrade_external_issuer.golden | 4 ++-- cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml | 4 ++-- cli/cmd/testdata/upgrade_grafana_disabled.yaml | 4 ++-- cli/cmd/testdata/upgrade_grafana_enabled.yaml | 4 ++-- cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml | 4 ++-- cli/cmd/testdata/upgrade_grafana_overwrite.yaml | 4 ++-- cli/cmd/testdata/upgrade_ha.golden | 4 ++-- cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden | 4 ++-- cli/cmd/testdata/upgrade_nothing_addon.yaml | 4 ++-- cli/cmd/testdata/upgrade_overwrite_issuer.golden | 4 ++-- .../upgrade_overwrite_trust_anchors-external-issuer.golden | 4 ++-- cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden | 4 ++-- cli/cmd/testdata/upgrade_two_level_webhook_cert.golden | 4 ++-- 40 files changed, 81 insertions(+), 75 deletions(-) diff --git a/charts/linkerd2/templates/controller.yaml b/charts/linkerd2/templates/controller.yaml index 5043646fc9184..defc64c1a09d2 100644 --- a/charts/linkerd2/templates/controller.yaml +++ b/charts/linkerd2/templates/controller.yaml @@ -69,10 +69,12 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.{{.Values.global.namespace}}.svc.{{.Values.global.clusterDomain}}:9090 - -destination-addr=linkerd-dst.{{.Values.global.namespace}}.svc.{{.Values.global.clusterDomain}}:8086 - -controller-namespace={{.Values.global.namespace}} - -log-level={{.Values.global.controllerLogLevel}} + {{- if .Values.prometheus.enabled }} + - -prometheus-url=http://linkerd-prometheus.{{.Values.global.namespace}}.svc.{{.Values.global.clusterDomain}}:9090 + {{- end}} {{- include "partials.linkerd.trace" . | nindent 8 -}} image: {{.Values.controllerImage}}:{{default .Values.global.linkerdVersion .Values.global.controllerImageVersion}} imagePullPolicy: {{.Values.global.imagePullPolicy}} diff --git a/charts/linkerd2/templates/heartbeat.yaml b/charts/linkerd2/templates/heartbeat.yaml index 3e513adfbc1fd..9695ca8e4b384 100644 --- a/charts/linkerd2/templates/heartbeat.yaml +++ b/charts/linkerd2/templates/heartbeat.yaml @@ -39,9 +39,11 @@ spec: imagePullPolicy: {{.Values.global.imagePullPolicy}} args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.{{.Values.global.namespace}}.svc.{{.Values.global.clusterDomain}}:9090" - "-controller-namespace={{.Values.global.namespace}}" - "-log-level={{.Values.global.controllerLogLevel}}" + {{- if .Values.prometheus.enabled }} + - "-prometheus-url=http://linkerd-prometheus.{{.Values.global.namespace}}.svc.{{.Values.global.clusterDomain}}:9090" + {{- end}} {{- if .Values.heartbeatResources -}} {{- include "partials.resources" .Values.heartbeatResources | nindent 12 }} {{- end }} diff --git a/charts/linkerd2/templates/psp.yaml b/charts/linkerd2/templates/psp.yaml index a64fc11a1b2cc..31162dc0f09b3 100644 --- a/charts/linkerd2/templates/psp.yaml +++ b/charts/linkerd2/templates/psp.yaml @@ -103,9 +103,11 @@ subjects: - kind: ServiceAccount name: linkerd-identity namespace: {{.Values.global.namespace}} +{{ if .Values.prometheus.enabled -}} - kind: ServiceAccount name: linkerd-prometheus namespace: {{.Values.global.namespace}} +{{ end -}} - kind: ServiceAccount name: linkerd-proxy-injector namespace: {{.Values.global.namespace}} diff --git a/cli/cmd/testdata/install_addon_control-plane.golden b/cli/cmd/testdata/install_addon_control-plane.golden index 0af6f19a009cd..0362912820e5d 100644 --- a/cli/cmd/testdata/install_addon_control-plane.golden +++ b/cli/cmd/testdata/install_addon_control-plane.golden @@ -315,10 +315,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -737,9 +737,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/install_control-plane.golden b/cli/cmd/testdata/install_control-plane.golden index a58abad3c8cd9..02499ebc00854 100644 --- a/cli/cmd/testdata/install_control-plane.golden +++ b/cli/cmd/testdata/install_control-plane.golden @@ -315,10 +315,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -737,9 +737,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/install_controlplane_tracing_output.golden b/cli/cmd/testdata/install_controlplane_tracing_output.golden index eef50559a978c..d55c23ce3f17e 100644 --- a/cli/cmd/testdata/install_controlplane_tracing_output.golden +++ b/cli/cmd/testdata/install_controlplane_tracing_output.golden @@ -1119,10 +1119,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -trace-collector=linkerd-collector.linkerd.svc.cluster.local:55678 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent @@ -1571,9 +1571,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/install_custom_registry.golden b/cli/cmd/testdata/install_custom_registry.golden index 899a9b4d5ab22..5745434c0f8e3 100644 --- a/cli/cmd/testdata/install_custom_registry.golden +++ b/cli/cmd/testdata/install_custom_registry.golden @@ -1104,10 +1104,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: my.custom.registry/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1526,9 +1526,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/install_default.golden b/cli/cmd/testdata/install_default.golden index 220ca7d91dbb7..9727efdea7cfa 100644 --- a/cli/cmd/testdata/install_default.golden +++ b/cli/cmd/testdata/install_default.golden @@ -1104,10 +1104,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1526,9 +1526,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/install_default_override_dst_get_nets.golden b/cli/cmd/testdata/install_default_override_dst_get_nets.golden index 7dfc24d020877..5ef492dfb9c8f 100644 --- a/cli/cmd/testdata/install_default_override_dst_get_nets.golden +++ b/cli/cmd/testdata/install_default_override_dst_get_nets.golden @@ -1104,10 +1104,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1526,9 +1526,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/install_grafana_existing.golden b/cli/cmd/testdata/install_grafana_existing.golden index 621f8b3a9f719..d48a688a7582e 100644 --- a/cli/cmd/testdata/install_grafana_existing.golden +++ b/cli/cmd/testdata/install_grafana_existing.golden @@ -1101,10 +1101,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1523,9 +1523,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/install_ha_output.golden b/cli/cmd/testdata/install_ha_output.golden index c5e7938a1f642..be028650f9775 100644 --- a/cli/cmd/testdata/install_ha_output.golden +++ b/cli/cmd/testdata/install_ha_output.golden @@ -1163,10 +1163,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1634,9 +1634,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" resources: limits: cpu: "1" diff --git a/cli/cmd/testdata/install_ha_with_overrides_output.golden b/cli/cmd/testdata/install_ha_with_overrides_output.golden index d1a58c0424bc6..559a2a5156b52 100644 --- a/cli/cmd/testdata/install_ha_with_overrides_output.golden +++ b/cli/cmd/testdata/install_ha_with_overrides_output.golden @@ -1163,10 +1163,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1634,9 +1634,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" resources: limits: cpu: "1" diff --git a/cli/cmd/testdata/install_heartbeat_disabled_output.golden b/cli/cmd/testdata/install_heartbeat_disabled_output.golden index 278776d760b7e..4864ce63fb0f8 100644 --- a/cli/cmd/testdata/install_heartbeat_disabled_output.golden +++ b/cli/cmd/testdata/install_heartbeat_disabled_output.golden @@ -1060,10 +1060,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_helm_output.golden b/cli/cmd/testdata/install_helm_output.golden index 4478a8b9e344b..64c00e194925f 100644 --- a/cli/cmd/testdata/install_helm_output.golden +++ b/cli/cmd/testdata/install_helm_output.golden @@ -1187,10 +1187,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1595,9 +1595,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/install_helm_output_addons.golden b/cli/cmd/testdata/install_helm_output_addons.golden index ae76c066f5c22..9f44d896c8cb4 100644 --- a/cli/cmd/testdata/install_helm_output_addons.golden +++ b/cli/cmd/testdata/install_helm_output_addons.golden @@ -1187,10 +1187,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1595,9 +1595,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/install_helm_output_ha.golden b/cli/cmd/testdata/install_helm_output_ha.golden index 99ba025dbef5e..cdc97b59ddd1d 100644 --- a/cli/cmd/testdata/install_helm_output_ha.golden +++ b/cli/cmd/testdata/install_helm_output_ha.golden @@ -1246,10 +1246,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:linkerd-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1703,9 +1703,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" resources: limits: cpu: "1" diff --git a/cli/cmd/testdata/install_no_init_container.golden b/cli/cmd/testdata/install_no_init_container.golden index 86866018cd2c2..cc65450de5600 100644 --- a/cli/cmd/testdata/install_no_init_container.golden +++ b/cli/cmd/testdata/install_no_init_container.golden @@ -1068,10 +1068,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1424,9 +1424,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/install_output.golden b/cli/cmd/testdata/install_output.golden index 7216ff50d6641..1183b1c8ff2cf 100644 --- a/cli/cmd/testdata/install_output.golden +++ b/cli/cmd/testdata/install_output.golden @@ -1103,10 +1103,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.Namespace.svc.cluster.local:9090 - -destination-addr=linkerd-dst.Namespace.svc.cluster.local:8086 - -controller-namespace=Namespace - -log-level=ControllerLogLevel + - -prometheus-url=http://linkerd-prometheus.Namespace.svc.cluster.local:9090 image: ControllerImage:ControllerImageVersion imagePullPolicy: ImagePullPolicy livenessProbe: @@ -1523,9 +1523,9 @@ spec: imagePullPolicy: ImagePullPolicy args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.Namespace.svc.cluster.local:9090" - "-controller-namespace=Namespace" - "-log-level=ControllerLogLevel" + - "-prometheus-url=http://linkerd-prometheus.Namespace.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/install_prometheus_overwrite.golden b/cli/cmd/testdata/install_prometheus_overwrite.golden index 731eac1c69295..6af778df53dcd 100644 --- a/cli/cmd/testdata/install_prometheus_overwrite.golden +++ b/cli/cmd/testdata/install_prometheus_overwrite.golden @@ -1104,10 +1104,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1526,9 +1526,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/install_proxy_ignores.golden b/cli/cmd/testdata/install_proxy_ignores.golden index 09dacc2ad5a86..f047722a4b5dd 100644 --- a/cli/cmd/testdata/install_proxy_ignores.golden +++ b/cli/cmd/testdata/install_proxy_ignores.golden @@ -1104,10 +1104,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1526,9 +1526,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/install_restricted_dashboard.golden b/cli/cmd/testdata/install_restricted_dashboard.golden index 8a7f83c79e272..d07f04a3b1cd5 100644 --- a/cli/cmd/testdata/install_restricted_dashboard.golden +++ b/cli/cmd/testdata/install_restricted_dashboard.golden @@ -1036,10 +1036,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1458,9 +1458,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/install_tracing.golden b/cli/cmd/testdata/install_tracing.golden index 5d94983b9fc8a..0958a52c8e8e4 100644 --- a/cli/cmd/testdata/install_tracing.golden +++ b/cli/cmd/testdata/install_tracing.golden @@ -1104,10 +1104,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1526,9 +1526,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/install_tracing_overwrite.golden b/cli/cmd/testdata/install_tracing_overwrite.golden index 5c2d36976cb4e..dedd88d4661f9 100644 --- a/cli/cmd/testdata/install_tracing_overwrite.golden +++ b/cli/cmd/testdata/install_tracing_overwrite.golden @@ -1104,10 +1104,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:install-control-plane-version imagePullPolicy: IfNotPresent livenessProbe: @@ -1526,9 +1526,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/upgrade_add-on_controlplane.golden b/cli/cmd/testdata/upgrade_add-on_controlplane.golden index 401e7f75bd050..a1a69a7e92180 100644 --- a/cli/cmd/testdata/upgrade_add-on_controlplane.golden +++ b/cli/cmd/testdata/upgrade_add-on_controlplane.golden @@ -317,10 +317,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -743,9 +743,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/upgrade_add-on_overwrite.golden b/cli/cmd/testdata/upgrade_add-on_overwrite.golden index ae1088b573b38..9edd542d6f9d9 100644 --- a/cli/cmd/testdata/upgrade_add-on_overwrite.golden +++ b/cli/cmd/testdata/upgrade_add-on_overwrite.golden @@ -1106,10 +1106,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1532,9 +1532,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/upgrade_add_add-on.golden b/cli/cmd/testdata/upgrade_add_add-on.golden index d36fb5ce2e36b..d7bd4a665d04a 100644 --- a/cli/cmd/testdata/upgrade_add_add-on.golden +++ b/cli/cmd/testdata/upgrade_add_add-on.golden @@ -1106,10 +1106,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1532,9 +1532,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/upgrade_default.golden b/cli/cmd/testdata/upgrade_default.golden index 74140a14b8701..9f261e5b1ff95 100644 --- a/cli/cmd/testdata/upgrade_default.golden +++ b/cli/cmd/testdata/upgrade_default.golden @@ -1106,10 +1106,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1532,9 +1532,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/upgrade_external_issuer.golden b/cli/cmd/testdata/upgrade_external_issuer.golden index 802537c1f7fa7..cfda0b5d00671 100644 --- a/cli/cmd/testdata/upgrade_external_issuer.golden +++ b/cli/cmd/testdata/upgrade_external_issuer.golden @@ -1092,10 +1092,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1518,9 +1518,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml index a7973c898188f..888cace0980b7 100644 --- a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml @@ -1106,10 +1106,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1532,9 +1532,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/upgrade_grafana_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_disabled.yaml index b0aeba548b234..da99c171ad237 100644 --- a/cli/cmd/testdata/upgrade_grafana_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_disabled.yaml @@ -1103,10 +1103,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1529,9 +1529,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/upgrade_grafana_enabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled.yaml index a7973c898188f..888cace0980b7 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled.yaml @@ -1106,10 +1106,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1532,9 +1532,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml index b0aeba548b234..da99c171ad237 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml @@ -1103,10 +1103,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1529,9 +1529,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml index cd85d1d52e0c3..f2326d8fe5e99 100644 --- a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml @@ -1106,10 +1106,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1532,9 +1532,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/upgrade_ha.golden b/cli/cmd/testdata/upgrade_ha.golden index 859d31b11f1af..441f0a71a2db3 100644 --- a/cli/cmd/testdata/upgrade_ha.golden +++ b/cli/cmd/testdata/upgrade_ha.golden @@ -1165,10 +1165,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1640,9 +1640,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" resources: limits: cpu: "1" diff --git a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden index f8ce6c655e815..537b6217f927b 100644 --- a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden +++ b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden @@ -1106,10 +1106,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1532,9 +1532,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/upgrade_nothing_addon.yaml b/cli/cmd/testdata/upgrade_nothing_addon.yaml index a7973c898188f..888cace0980b7 100644 --- a/cli/cmd/testdata/upgrade_nothing_addon.yaml +++ b/cli/cmd/testdata/upgrade_nothing_addon.yaml @@ -1106,10 +1106,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1532,9 +1532,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/upgrade_overwrite_issuer.golden b/cli/cmd/testdata/upgrade_overwrite_issuer.golden index a93e49ab9326d..b65d252fd2ba3 100644 --- a/cli/cmd/testdata/upgrade_overwrite_issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_issuer.golden @@ -1104,10 +1104,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1526,9 +1526,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden index f49585b409dce..25046b0a2d867 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden @@ -1090,10 +1090,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1512,9 +1512,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden index a93e49ab9326d..b65d252fd2ba3 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden @@ -1104,10 +1104,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1526,9 +1526,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- diff --git a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden index 38734401dafbd..d717ef7c12973 100644 --- a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden +++ b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden @@ -1106,10 +1106,10 @@ spec: containers: - args: - public-api - - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 - -destination-addr=linkerd-dst.linkerd.svc.cluster.local:8086 - -controller-namespace=linkerd - -log-level=info + - -prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090 image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION imagePullPolicy: IfNotPresent livenessProbe: @@ -1532,9 +1532,9 @@ spec: imagePullPolicy: IfNotPresent args: - "heartbeat" - - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" - "-controller-namespace=linkerd" - "-log-level=info" + - "-prometheus-url=http://linkerd-prometheus.linkerd.svc.cluster.local:9090" securityContext: runAsUser: 2103 --- From 05057cdafca3fab96f65ad7b5d78bee26e1db134 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Tue, 7 Jul 2020 16:03:41 +0530 Subject: [PATCH 39/42] update golden files Signed-off-by: Tarun Pothulapati --- cli/cmd/testdata/install_prometheus_overwrite.golden | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cli/cmd/testdata/install_prometheus_overwrite.golden b/cli/cmd/testdata/install_prometheus_overwrite.golden index 6af778df53dcd..3e561f421acb5 100644 --- a/cli/cmd/testdata/install_prometheus_overwrite.golden +++ b/cli/cmd/testdata/install_prometheus_overwrite.golden @@ -141,6 +141,9 @@ rules: - apiGroups: ["split.smi-spec.io"] resources: ["trafficsplits"] verbs: ["list", "get", "watch"] +- apiGroups: ["discovery.k8s.io"] + resources: ["endpointslices"] + verbs: ["list", "get", "watch"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 From 2ab64720e1b8896883e1743f052917b4583bb236 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Wed, 8 Jul 2020 21:43:14 +0530 Subject: [PATCH 40/42] updates golden files now that add-on level values files accordingly to include more Signed-off-by: Tarun Pothulapati --- cli/cmd/testdata/install_helm_output.golden | 17 +++++++++++++++++ .../testdata/install_helm_output_addons.golden | 17 +++++++++++++++++ cli/cmd/testdata/install_helm_output_ha.golden | 17 +++++++++++++++++ 3 files changed, 51 insertions(+) diff --git a/cli/cmd/testdata/install_helm_output.golden b/cli/cmd/testdata/install_helm_output.golden index 252f8fae84592..adcc410a4e13a 100644 --- a/cli/cmd/testdata/install_helm_output.golden +++ b/cli/cmd/testdata/install_helm_output.golden @@ -2500,7 +2500,17 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 tracing: enabled: false --- @@ -2865,6 +2875,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3049,6 +3062,10 @@ spec: fsGroup: 65534 containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_helm_output_addons.golden b/cli/cmd/testdata/install_helm_output_addons.golden index 78b8d9c94ff13..a9f0fbc5fc964 100644 --- a/cli/cmd/testdata/install_helm_output_addons.golden +++ b/cli/cmd/testdata/install_helm_output_addons.golden @@ -2501,7 +2501,17 @@ data: name: gcr.io/linkerd-io/grafana name: linkerd-grafana prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 tracing: collector: image: omnition/opencensus-collector:0.1.11 @@ -2872,6 +2882,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3056,6 +3069,10 @@ spec: fsGroup: 65534 containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: diff --git a/cli/cmd/testdata/install_helm_output_ha.golden b/cli/cmd/testdata/install_helm_output_ha.golden index be78aec330a63..f08ac9392b43a 100644 --- a/cli/cmd/testdata/install_helm_output_ha.golden +++ b/cli/cmd/testdata/install_helm_output_ha.golden @@ -2743,7 +2743,17 @@ data: limit: 1024Mi request: 50Mi prometheus: + args: + config.file: /etc/prometheus/prometheus.yml + log.level: info + storage.tsdb.path: /data + storage.tsdb.retention.time: 6h enabled: true + globalConfig: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + image: prom/prometheus:v2.15.2 resources: cpu: limit: "4" @@ -3128,6 +3138,9 @@ metadata: data: prometheus.yml: |- global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s rule_files: - /etc/prometheus/*_rules.yml @@ -3312,6 +3325,10 @@ spec: fsGroup: 65534 containers: - args: + - --config.file=/etc/prometheus/prometheus.yml + - --log.level=info + - --storage.tsdb.path=/data + - --storage.tsdb.retention.time=6h image: prom/prometheus:v2.15.2 imagePullPolicy: IfNotPresent livenessProbe: From 376934134ff32c69b1676508758592825350f2c0 Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Wed, 8 Jul 2020 21:51:07 +0530 Subject: [PATCH 41/42] update prom template to not have default value Signed-off-by: Tarun Pothulapati --- charts/add-ons/prometheus/templates/prometheus.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/add-ons/prometheus/templates/prometheus.yaml b/charts/add-ons/prometheus/templates/prometheus.yaml index c323cd8139e35..6eafd92bbb55a 100644 --- a/charts/add-ons/prometheus/templates/prometheus.yaml +++ b/charts/add-ons/prometheus/templates/prometheus.yaml @@ -235,7 +235,7 @@ spec: {{- range $key, $value := .Values.args}} - --{{ $key }}{{ if $value }}={{ $value }}{{ end }} {{- end }} - image: {{ default "prom/prometheus:v2.15.2" .Values.image }} + image: {{.Values.image}} imagePullPolicy: {{.Values.global.imagePullPolicy}} livenessProbe: httpGet: From af6399867cf14d8372dd0e027d9836f91ca9b47b Mon Sep 17 00:00:00 2001 From: Tarun Pothulapati Date: Thu, 9 Jul 2020 23:01:07 +0530 Subject: [PATCH 42/42] update golden files Signed-off-by: Tarun Pothulapati --- .../install_addon_control-plane.golden | 239 ++-------------- cli/cmd/testdata/install_control-plane.golden | 206 +------------- ...install_controlplane_tracing_output.golden | 233 +--------------- .../testdata/install_custom_registry.golden | 219 +-------------- cli/cmd/testdata/install_default.golden | 219 +-------------- ...stall_default_override_dst_get_nets.golden | 219 +-------------- .../testdata/install_grafana_existing.golden | 27 +- cli/cmd/testdata/install_ha_output.golden | 237 +--------------- .../install_ha_with_overrides_output.golden | 237 +--------------- .../install_heartbeat_disabled_output.golden | 217 +-------------- cli/cmd/testdata/install_helm_output.golden | 212 +------------- .../install_helm_output_addons.golden | 250 ++--------------- .../testdata/install_helm_output_ha.golden | 231 +--------------- cli/cmd/testdata/install_output.golden | 218 +-------------- .../install_prometheus_overwrite.golden | 45 +++ cli/cmd/testdata/install_proxy_ignores.golden | 219 +-------------- .../install_restricted_dashboard.golden | 219 +-------------- cli/cmd/testdata/install_tracing.golden | 257 ++--------------- .../testdata/install_tracing_overwrite.golden | 257 ++--------------- .../upgrade_add-on_controlplane.golden | 241 ++-------------- .../testdata/upgrade_add-on_overwrite.golden | 259 ++---------------- cli/cmd/testdata/upgrade_add_add-on.golden | 259 ++---------------- cli/cmd/testdata/upgrade_default.golden | 221 +-------------- .../testdata/upgrade_external_issuer.golden | 221 +-------------- .../upgrade_grafana_addon_overwrite.yaml | 221 +-------------- .../testdata/upgrade_grafana_disabled.yaml | 27 +- cli/cmd/testdata/upgrade_grafana_enabled.yaml | 221 +-------------- .../upgrade_grafana_enabled_disabled.yaml | 27 +- .../testdata/upgrade_grafana_overwrite.yaml | 221 +-------------- cli/cmd/testdata/upgrade_ha.golden | 239 +--------------- .../upgrade_keep_webhook_cabundle.golden | 221 +-------------- cli/cmd/testdata/upgrade_nothing_addon.yaml | 221 +-------------- .../testdata/upgrade_overwrite_issuer.golden | 219 +-------------- ...write_trust_anchors-external-issuer.golden | 219 +-------------- .../upgrade_overwrite_trust_anchors.golden | 219 +-------------- .../upgrade_two_level_webhook_cert.golden | 221 +-------------- 36 files changed, 364 insertions(+), 7074 deletions(-) diff --git a/cli/cmd/testdata/install_addon_control-plane.golden b/cli/cmd/testdata/install_addon_control-plane.golden index a7ef25a97f668..b9d924389b492 100644 --- a/cli/cmd/testdata/install_addon_control-plane.golden +++ b/cli/cmd/testdata/install_addon_control-plane.golden @@ -1147,207 +1147,6 @@ spec: - mountPath: /var/run/linkerd/identity/end-entity name: linkerd-identity-end-entity initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -kind: Service -apiVersion: v1 -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 2103 - volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - - mountPath: /var/run/linkerd/tls - name: tls - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - args: - --incoming-proxy-port - "4143" @@ -2180,6 +1979,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-grafana volumes: - emptyDir: {} @@ -2194,6 +1996,8 @@ spec: path: provisioning/dashboards/dashboards.yaml name: linkerd-grafana-config name: grafana-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity @@ -2565,16 +2369,13 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-collector + serviceAccountName: linkerd-prometheus volumes: - name: data emptyDir: {} - configMap: - items: - - key: linkerd-collector-config - path: linkerd-collector-config.yaml - name: linkerd-collector-config - name: linkerd-collector-config-val + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -2825,9 +2626,14 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - dnsPolicy: ClusterFirst - serviceAccountName: linkerd-jaeger + serviceAccountName: linkerd-collector volumes: + - configMap: + items: + - key: linkerd-collector-config + path: linkerd-collector-config.yaml + name: linkerd-collector-config + name: linkerd-collector-config-val - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -3032,20 +2838,9 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-grafana + dnsPolicy: ClusterFirst + serviceAccountName: linkerd-jaeger volumes: - - emptyDir: {} - name: data - - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: diff --git a/cli/cmd/testdata/install_control-plane.golden b/cli/cmd/testdata/install_control-plane.golden index ecb32df6b0e1e..9d1a58d1be4f6 100644 --- a/cli/cmd/testdata/install_control-plane.golden +++ b/cli/cmd/testdata/install_control-plane.golden @@ -1146,207 +1146,6 @@ spec: - mountPath: /var/run/linkerd/identity/end-entity name: linkerd-identity-end-entity initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -kind: Service -apiVersion: v1 -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 2103 - volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - - mountPath: /var/run/linkerd/tls - name: tls - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - args: - --incoming-proxy-port - "4143" @@ -2560,6 +2359,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -2567,6 +2369,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_controlplane_tracing_output.golden b/cli/cmd/testdata/install_controlplane_tracing_output.golden index 86d1945fb6b0c..71f5624ac4152 100644 --- a/cli/cmd/testdata/install_controlplane_tracing_output.golden +++ b/cli/cmd/testdata/install_controlplane_tracing_output.golden @@ -1845,234 +1845,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_TRACE_ATTRIBUTES_PATH - value: /var/run/linkerd/podinfo/labels - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TRACE_COLLECTOR_SVC_ADDR - value: linkerd-collector.linkerd.svc.cluster.local:55678 - - name: LINKERD2_PROXY_TRACE_COLLECTOR_SVC_NAME - value: linkerd-collector.linkerd.serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: var/run/linkerd/podinfo - name: podinfo - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - downwardAPI: - items: - - fieldRef: - fieldPath: metadata.labels - path: "labels" - name: podinfo - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3558,6 +3330,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3571,6 +3346,8 @@ spec: fieldPath: metadata.labels path: "labels" name: podinfo + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_custom_registry.golden b/cli/cmd/testdata/install_custom_registry.golden index af5921611ed80..c30bd495089a8 100644 --- a/cli/cmd/testdata/install_custom_registry.golden +++ b/cli/cmd/testdata/install_custom_registry.golden @@ -1785,220 +1785,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: my.custom.registry/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: my.custom.registry/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: my.custom.registry/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3419,6 +3205,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3426,6 +3215,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_default.golden b/cli/cmd/testdata/install_default.golden index e3ba4c4861bda..ccc43399d846f 100644 --- a/cli/cmd/testdata/install_default.golden +++ b/cli/cmd/testdata/install_default.golden @@ -1785,220 +1785,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3419,6 +3205,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3426,6 +3215,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_default_override_dst_get_nets.golden b/cli/cmd/testdata/install_default_override_dst_get_nets.golden index 2005329c2ce75..c0b4b05636fc9 100644 --- a/cli/cmd/testdata/install_default_override_dst_get_nets.golden +++ b/cli/cmd/testdata/install_default_override_dst_get_nets.golden @@ -1785,220 +1785,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.0.0.0/8" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3419,6 +3205,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3426,6 +3215,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_grafana_existing.golden b/cli/cmd/testdata/install_grafana_existing.golden index c9f9f023899d6..e670004a528c4 100644 --- a/cli/cmd/testdata/install_grafana_existing.golden +++ b/cli/cmd/testdata/install_grafana_existing.golden @@ -1970,11 +1970,14 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-proxy-injector volumes: - configMap: - name: linkerd-prometheus-config - name: prometheus-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -2211,11 +2214,11 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -2443,11 +2446,11 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -2890,13 +2893,13 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: - name: data emptyDir: {} - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: diff --git a/cli/cmd/testdata/install_ha_output.golden b/cli/cmd/testdata/install_ha_output.golden index d0eafaabc003e..47dbde63add54 100644 --- a/cli/cmd/testdata/install_ha_output.golden +++ b/cli/cmd/testdata/install_ha_output.golden @@ -1913,238 +1913,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 3 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - strategy: - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 - containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus - ports: - - containerPort: 9090 - name: admin-http - readinessProbe: - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - resources: - limits: - cpu: "4" - memory: "8192Mi" - requests: - cpu: "300m" - memory: "300Mi" - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - limits: - cpu: "1" - memory: "250Mi" - requests: - cpu: "100m" - memory: "20Mi" - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3713,6 +3481,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3720,6 +3491,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_ha_with_overrides_output.golden b/cli/cmd/testdata/install_ha_with_overrides_output.golden index 268e0a83be98e..5e776a4aa27d3 100644 --- a/cli/cmd/testdata/install_ha_with_overrides_output.golden +++ b/cli/cmd/testdata/install_ha_with_overrides_output.golden @@ -1913,238 +1913,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 2 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - strategy: - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 - containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus - ports: - - containerPort: 9090 - name: admin-http - readinessProbe: - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - resources: - limits: - cpu: "4" - memory: "8192Mi" - requests: - cpu: "300m" - memory: "300Mi" - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - limits: - cpu: "1" - memory: "250Mi" - requests: - cpu: "400m" - memory: "300Mi" - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3713,6 +3481,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3720,6 +3491,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_heartbeat_disabled_output.golden b/cli/cmd/testdata/install_heartbeat_disabled_output.golden index bcefaec28dae5..52b8d33dd080d 100644 --- a/cli/cmd/testdata/install_heartbeat_disabled_output.golden +++ b/cli/cmd/testdata/install_heartbeat_disabled_output.golden @@ -1696,218 +1696,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3328,6 +3116,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3335,6 +3126,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_helm_output.golden b/cli/cmd/testdata/install_helm_output.golden index cff25e0dbdccd..0dd74f0ec393d 100644 --- a/cli/cmd/testdata/install_helm_output.golden +++ b/cli/cmd/testdata/install_helm_output.golden @@ -1849,213 +1849,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/helm-release-version: "0" - linkerd.io/created-by: linkerd/helm linkerd-version - linkerd.io/identity-mode: default - linkerd.io/proxy-version: test-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:linkerd-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - test-trust-anchor - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: test.trust.domain - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:test-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,222 - - --outbound-ports-to-ignore - - 443,111 - image: gcr.io/linkerd-io/proxy-init:test-proxy-init-version - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -# Source: linkerd2/templates/proxy-injector.yaml ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version @@ -3460,6 +3253,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3467,6 +3263,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_helm_output_addons.golden b/cli/cmd/testdata/install_helm_output_addons.golden index df08580fa7b20..2aa4555e5c5d6 100644 --- a/cli/cmd/testdata/install_helm_output_addons.golden +++ b/cli/cmd/testdata/install_helm_output_addons.golden @@ -1850,213 +1850,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/helm-release-version: "0" - linkerd.io/created-by: linkerd/helm linkerd-version - linkerd.io/identity-mode: default - linkerd.io/proxy-version: test-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:linkerd-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - test-trust-anchor - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: test.trust.domain - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:test-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,222 - - --outbound-ports-to-ignore - - 443,111 - image: gcr.io/linkerd-io/proxy-init:test-proxy-init-version - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -# Source: linkerd2/templates/proxy-injector.yaml ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version @@ -3047,16 +2840,20 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-collector + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: items: - - key: linkerd-collector-config - path: linkerd-collector-config.yaml - name: linkerd-collector-config - name: linkerd-collector-config-val + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -3463,6 +3260,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3470,6 +3270,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity @@ -3739,9 +3541,14 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - dnsPolicy: ClusterFirst - serviceAccountName: linkerd-jaeger + serviceAccountName: linkerd-collector volumes: + - configMap: + items: + - key: linkerd-collector-config + path: linkerd-collector-config.yaml + name: linkerd-collector-config + name: linkerd-collector-config-val - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -3937,20 +3744,9 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-grafana + dnsPolicy: ClusterFirst + serviceAccountName: linkerd-jaeger volumes: - - emptyDir: {} - name: data - - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: diff --git a/cli/cmd/testdata/install_helm_output_ha.golden b/cli/cmd/testdata/install_helm_output_ha.golden index 0e52c837327b2..5b6c31b715fba 100644 --- a/cli/cmd/testdata/install_helm_output_ha.golden +++ b/cli/cmd/testdata/install_helm_output_ha.golden @@ -1977,232 +1977,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/helm linkerd-version - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: linkerd-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 3 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - strategy: - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - annotations: - linkerd.io/helm-release-version: "0" - linkerd.io/created-by: linkerd/helm linkerd-version - linkerd.io/identity-mode: default - linkerd.io/proxy-version: test-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 - containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus - ports: - - containerPort: 9090 - name: admin-http - readinessProbe: - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - resources: - limits: - cpu: "4" - memory: "8192Mi" - requests: - cpu: "300m" - memory: "300Mi" - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - test-trust-anchor - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: test.trust.domain - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:test-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - limits: - cpu: "1" - memory: "250Mi" - requests: - cpu: "100m" - memory: "20Mi" - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,222 - - --outbound-ports-to-ignore - - 443,111 - image: gcr.io/linkerd-io/proxy-init:test-proxy-init-version - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -# Source: linkerd2/templates/proxy-injector.yaml ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/helm linkerd-version @@ -3755,6 +3529,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3762,6 +3539,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_output.golden b/cli/cmd/testdata/install_output.golden index eca5240b5694a..9aea2f39040fd 100644 --- a/cli/cmd/testdata/install_output.golden +++ b/cli/cmd/testdata/install_output.golden @@ -1781,219 +1781,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - CreatedByAnnotation: CliVersion - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: ControllerImageVersion - ControllerComponentLabel: proxy-injector - ControllerNamespaceLabel: Namespace - name: linkerd-proxy-injector - namespace: Namespace -spec: - replicas: 1 - selector: - matchLabels: - ControllerComponentLabel: proxy-injector - template: - metadata: - annotations: - CreatedByAnnotation: CliVersion - linkerd.io/identity-mode: default - linkerd.io/proxy-version: ProxyVersion - labels: - ControllerComponentLabel: proxy-injector - ControllerNamespaceLabel: Namespace - WorkloadNamespaceLabel: Namespace - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=ControllerLogLevel - image: ControllerImage:ControllerImageVersion - imagePullPolicy: ImagePullPolicy - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.Namespace.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "DestinationGetNetworks" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.Namespace.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: Namespace - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: ProxyImageName:ProxyVersion - imagePullPolicy: ImagePullPolicy - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: ProxyInitImageName:ProxyInitVersion - imagePullPolicy: ImagePullPolicy - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: CreatedByAnnotation: CliVersion @@ -3410,6 +3197,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3417,6 +3207,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_prometheus_overwrite.golden b/cli/cmd/testdata/install_prometheus_overwrite.golden index 3e561f421acb5..1bea2dc660776 100644 --- a/cli/cmd/testdata/install_prometheus_overwrite.golden +++ b/cli/cmd/testdata/install_prometheus_overwrite.golden @@ -1035,6 +1035,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-identity volumes: - configMap: @@ -1043,6 +1046,8 @@ spec: - name: identity-issuer secret: secretName: linkerd-identity-issuer + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity @@ -1259,11 +1264,16 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-controller volumes: - configMap: name: linkerd-config name: config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity @@ -1480,11 +1490,16 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-destination volumes: - configMap: name: linkerd-config name: config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity @@ -1750,11 +1765,16 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-web volumes: - configMap: name: linkerd-config name: config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity @@ -1950,6 +1970,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-proxy-injector volumes: - configMap: @@ -1958,6 +1981,8 @@ spec: - name: tls secret: secretName: linkerd-proxy-injector-tls + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity @@ -2189,11 +2214,16 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-sp-validator volumes: - name: tls secret: secretName: linkerd-sp-validator-tls + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity @@ -2416,11 +2446,16 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-tap volumes: - configMap: name: linkerd-config name: config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity @@ -2777,6 +2812,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-grafana volumes: - emptyDir: {} @@ -2791,6 +2829,8 @@ spec: path: provisioning/dashboards/dashboards.yaml name: linkerd-grafana-config name: grafana-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity @@ -3232,6 +3272,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: alerting-rules @@ -3245,6 +3288,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_proxy_ignores.golden b/cli/cmd/testdata/install_proxy_ignores.golden index 7f29a7aed9d30..9630c809fb31a 100644 --- a/cli/cmd/testdata/install_proxy_ignores.golden +++ b/cli/cmd/testdata/install_proxy_ignores.golden @@ -1785,220 +1785,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,22,8100-8102 - - --outbound-ports-to-ignore - - 443,5432 - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3419,6 +3205,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3426,6 +3215,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_restricted_dashboard.golden b/cli/cmd/testdata/install_restricted_dashboard.golden index 12a0b24d19dfd..6bb6875b94aae 100644 --- a/cli/cmd/testdata/install_restricted_dashboard.golden +++ b/cli/cmd/testdata/install_restricted_dashboard.golden @@ -1717,220 +1717,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3351,6 +3137,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3358,6 +3147,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/install_tracing.golden b/cli/cmd/testdata/install_tracing.golden index f69e29b1f7ead..f37bd33c2b4fe 100644 --- a/cli/cmd/testdata/install_tracing.golden +++ b/cli/cmd/testdata/install_tracing.golden @@ -1786,220 +1786,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3001,16 +2787,20 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-collector + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: items: - - key: linkerd-collector-config - path: linkerd-collector-config.yaml - name: linkerd-collector-config - name: linkerd-collector-config-val + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -3422,6 +3212,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3429,6 +3222,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity @@ -3703,9 +3498,14 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - dnsPolicy: ClusterFirst - serviceAccountName: linkerd-jaeger + serviceAccountName: linkerd-collector volumes: + - configMap: + items: + - key: linkerd-collector-config + path: linkerd-collector-config.yaml + name: linkerd-collector-config + name: linkerd-collector-config-val - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -3910,20 +3710,9 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-grafana + dnsPolicy: ClusterFirst + serviceAccountName: linkerd-jaeger volumes: - - emptyDir: {} - name: data - - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: diff --git a/cli/cmd/testdata/install_tracing_overwrite.golden b/cli/cmd/testdata/install_tracing_overwrite.golden index cc3d6e5510133..e8117eae4c9ec 100644 --- a/cli/cmd/testdata/install_tracing_overwrite.golden +++ b/cli/cmd/testdata/install_tracing_overwrite.golden @@ -1786,220 +1786,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: install-control-plane-version - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: install-proxy-version - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:install-control-plane-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:install-proxy-version - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3001,16 +2787,20 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: overwrite-collector + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: items: - - key: linkerd-collector-config - path: linkerd-collector-config.yaml - name: overwrite-collector-config - name: overwrite-collector-config-val + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -3422,6 +3212,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3429,6 +3222,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity @@ -3701,9 +3496,14 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - dnsPolicy: ClusterFirst - serviceAccountName: linkerd-jaeger + serviceAccountName: overwrite-collector volumes: + - configMap: + items: + - key: linkerd-collector-config + path: linkerd-collector-config.yaml + name: overwrite-collector-config + name: overwrite-collector-config-val - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -3908,20 +3708,9 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-grafana + dnsPolicy: ClusterFirst + serviceAccountName: linkerd-jaeger volumes: - - emptyDir: {} - name: data - - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: diff --git a/cli/cmd/testdata/upgrade_add-on_controlplane.golden b/cli/cmd/testdata/upgrade_add-on_controlplane.golden index 81db1c07c8601..1f750fc78d177 100644 --- a/cli/cmd/testdata/upgrade_add-on_controlplane.golden +++ b/cli/cmd/testdata/upgrade_add-on_controlplane.golden @@ -1157,209 +1157,6 @@ spec: - mountPath: /var/run/linkerd/identity/end-entity name: linkerd-identity-end-entity initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -kind: Service -apiVersion: v1 -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /ready - port: 9995 - securityContext: - runAsUser: 2103 - volumeMounts: - - mountPath: /var/run/linkerd/config - name: config - - mountPath: /var/run/linkerd/tls - name: tls - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - args: - --incoming-proxy-port - "4143" @@ -2198,6 +1995,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-grafana volumes: - emptyDir: {} @@ -2212,6 +2012,8 @@ spec: path: provisioning/dashboards/dashboards.yaml name: linkerd-grafana-config name: grafana-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity @@ -2585,16 +2387,13 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-collector + serviceAccountName: linkerd-prometheus volumes: - name: data emptyDir: {} - configMap: - items: - - key: linkerd-collector-config - path: linkerd-collector-config.yaml - name: linkerd-collector-config - name: linkerd-collector-config-val + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -2847,9 +2646,14 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - dnsPolicy: ClusterFirst - serviceAccountName: linkerd-jaeger + serviceAccountName: linkerd-collector volumes: + - configMap: + items: + - key: linkerd-collector-config + path: linkerd-collector-config.yaml + name: linkerd-collector-config + name: linkerd-collector-config-val - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -3056,20 +2860,9 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-grafana + dnsPolicy: ClusterFirst + serviceAccountName: linkerd-jaeger volumes: - - emptyDir: {} - name: data - - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: diff --git a/cli/cmd/testdata/upgrade_add-on_overwrite.golden b/cli/cmd/testdata/upgrade_add-on_overwrite.golden index ff1d9eca27d59..14f6eb13f0d48 100644 --- a/cli/cmd/testdata/upgrade_add-on_overwrite.golden +++ b/cli/cmd/testdata/upgrade_add-on_overwrite.golden @@ -1794,222 +1794,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3021,16 +2805,20 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: overwrite-collector + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: items: - - key: linkerd-collector-config - path: linkerd-collector-config.yaml - name: overwrite-collector-config - name: overwrite-collector-config-val + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -3444,6 +3232,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3451,6 +3242,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity @@ -3725,9 +3518,14 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - dnsPolicy: ClusterFirst - serviceAccountName: linkerd-jaeger + serviceAccountName: overwrite-collector volumes: + - configMap: + items: + - key: linkerd-collector-config + path: linkerd-collector-config.yaml + name: overwrite-collector-config + name: overwrite-collector-config-val - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -3934,20 +3732,9 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-grafana + dnsPolicy: ClusterFirst + serviceAccountName: linkerd-jaeger volumes: - - emptyDir: {} - name: data - - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: diff --git a/cli/cmd/testdata/upgrade_add_add-on.golden b/cli/cmd/testdata/upgrade_add_add-on.golden index 5392671e05668..5924df7872173 100644 --- a/cli/cmd/testdata/upgrade_add_add-on.golden +++ b/cli/cmd/testdata/upgrade_add_add-on.golden @@ -1794,222 +1794,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3019,16 +2803,20 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-collector + serviceAccountName: linkerd-grafana volumes: - emptyDir: {} name: data - configMap: items: - - key: linkerd-collector-config - path: linkerd-collector-config.yaml - name: linkerd-collector-config - name: linkerd-collector-config-val + - key: grafana.ini + path: grafana.ini + - key: datasources.yaml + path: provisioning/datasources/datasources.yaml + - key: dashboards.yaml + path: provisioning/dashboards/dashboards.yaml + name: linkerd-grafana-config + name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -3442,6 +3230,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3449,6 +3240,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity @@ -3725,9 +3518,14 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - dnsPolicy: ClusterFirst - serviceAccountName: linkerd-jaeger + serviceAccountName: linkerd-collector volumes: + - configMap: + items: + - key: linkerd-collector-config + path: linkerd-collector-config.yaml + name: linkerd-collector-config + name: linkerd-collector-config-val - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -3934,20 +3732,9 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-grafana + dnsPolicy: ClusterFirst + serviceAccountName: linkerd-jaeger volumes: - - emptyDir: {} - name: data - - configMap: - items: - - key: grafana.ini - path: grafana.ini - - key: datasources.yaml - path: provisioning/datasources/datasources.yaml - - key: dashboards.yaml - path: provisioning/dashboards/dashboards.yaml - name: linkerd-grafana-config - name: grafana-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: diff --git a/cli/cmd/testdata/upgrade_default.golden b/cli/cmd/testdata/upgrade_default.golden index 486e19f26fcb0..8e534f9484f45 100644 --- a/cli/cmd/testdata/upgrade_default.golden +++ b/cli/cmd/testdata/upgrade_default.golden @@ -1793,222 +1793,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,2525-2527,2529 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3439,6 +3223,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3446,6 +3233,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_external_issuer.golden b/cli/cmd/testdata/upgrade_external_issuer.golden index a39dd1cd2fde7..1e91baaed0d4e 100644 --- a/cli/cmd/testdata/upgrade_external_issuer.golden +++ b/cli/cmd/testdata/upgrade_external_issuer.golden @@ -1779,222 +1779,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3425,6 +3209,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3432,6 +3219,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml index 9e468d6d1bb96..5200f730f178a 100644 --- a/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_addon_overwrite.yaml @@ -1793,222 +1793,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3439,6 +3223,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3446,6 +3233,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_grafana_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_disabled.yaml index e1d3dd81a52e4..915a6b4d2fb91 100644 --- a/cli/cmd/testdata/upgrade_grafana_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_disabled.yaml @@ -1979,11 +1979,14 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-proxy-injector volumes: - configMap: - name: linkerd-prometheus-config - name: prometheus-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -2222,11 +2225,11 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -2456,11 +2459,11 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -2905,13 +2908,13 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: - name: data emptyDir: {} - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: diff --git a/cli/cmd/testdata/upgrade_grafana_enabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled.yaml index 9e468d6d1bb96..5200f730f178a 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled.yaml @@ -1793,222 +1793,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3439,6 +3223,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3446,6 +3233,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml index e1d3dd81a52e4..915a6b4d2fb91 100644 --- a/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml +++ b/cli/cmd/testdata/upgrade_grafana_enabled_disabled.yaml @@ -1979,11 +1979,14 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus + serviceAccountName: linkerd-proxy-injector volumes: - configMap: - name: linkerd-prometheus-config - name: prometheus-config + name: linkerd-config + name: config + - name: tls + secret: + secretName: linkerd-proxy-injector-tls - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -2222,11 +2225,11 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-proxy-injector + serviceAccountName: linkerd-sp-validator volumes: - name: tls secret: - secretName: linkerd-proxy-injector-tls + secretName: linkerd-sp-validator-tls - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -2456,11 +2459,11 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-sp-validator + serviceAccountName: linkerd-tap volumes: - - name: tls - secret: - secretName: linkerd-sp-validator-tls + - configMap: + name: linkerd-config + name: config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: @@ -2905,13 +2908,13 @@ spec: volumeMounts: - mountPath: /run name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-tap + serviceAccountName: linkerd-prometheus volumes: - name: data emptyDir: {} - configMap: - name: linkerd-config - name: config + name: linkerd-prometheus-config + name: prometheus-config - emptyDir: {} name: linkerd-proxy-init-xtables-lock - emptyDir: diff --git a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml index 686aa29f34e19..79fde0daebe41 100644 --- a/cli/cmd/testdata/upgrade_grafana_overwrite.yaml +++ b/cli/cmd/testdata/upgrade_grafana_overwrite.yaml @@ -1793,222 +1793,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3439,6 +3223,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3446,6 +3233,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_ha.golden b/cli/cmd/testdata/upgrade_ha.golden index ffc994d346088..8db5a3ce11686 100644 --- a/cli/cmd/testdata/upgrade_ha.golden +++ b/cli/cmd/testdata/upgrade_ha.golden @@ -1921,240 +1921,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 3 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - strategy: - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - securityContext: - fsGroup: 65534 - containers: - - args: - - --storage.tsdb.path=/data - - --storage.tsdb.retention.time=6h - - --config.file=/etc/prometheus/prometheus.yml - - --log.level=info - image: prom/prometheus:v2.15.2 - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /-/healthy - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - name: prometheus - ports: - - containerPort: 9090 - name: admin-http - readinessProbe: - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - resources: - limits: - cpu: "4" - memory: "8192Mi" - requests: - cpu: "300m" - memory: "300Mi" - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - limits: - cpu: "1" - memory: "250Mi" - requests: - cpu: "100m" - memory: "20Mi" - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3733,6 +3499,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3740,6 +3509,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden index dd8581ab46a23..f9d8c2fe1be26 100644 --- a/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden +++ b/cli/cmd/testdata/upgrade_keep_webhook_cabundle.golden @@ -1793,222 +1793,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,2525-2527,2529 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3439,6 +3223,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3446,6 +3233,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_nothing_addon.yaml b/cli/cmd/testdata/upgrade_nothing_addon.yaml index 9e468d6d1bb96..5200f730f178a 100644 --- a/cli/cmd/testdata/upgrade_nothing_addon.yaml +++ b/cli/cmd/testdata/upgrade_nothing_addon.yaml @@ -1793,222 +1793,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3439,6 +3223,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3446,6 +3233,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_overwrite_issuer.golden b/cli/cmd/testdata/upgrade_overwrite_issuer.golden index 0558d2a9ad9cd..0bbe6270b4efe 100644 --- a/cli/cmd/testdata/upgrade_overwrite_issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_issuer.golden @@ -1785,220 +1785,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3419,6 +3205,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3426,6 +3215,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden index 8d08b15a37625..2a39cad3afb1f 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors-external-issuer.golden @@ -1771,220 +1771,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3405,6 +3191,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3412,6 +3201,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden index 0558d2a9ad9cd..0bbe6270b4efe 100644 --- a/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden +++ b/cli/cmd/testdata/upgrade_overwrite_trust_anchors.golden @@ -1785,220 +1785,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBYDCCAQegAwIBAgIBATAKBggqhkjOPQQDAjAYMRYwFAYDVQQDEw1jbHVzdGVy - LmxvY2FsMB4XDTE5MDMwMzAxNTk1MloXDTI5MDIyODAyMDM1MlowGDEWMBQGA1UE - AxMNY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAChpAt0 - xtgO9qbVtEtDK80N6iCL2Htyf2kIv2m5QkJ1y0TFQi5hTVe3wtspJ8YpZF0pl364 - 6TiYeXB8tOOhIACjQjBAMA4GA1UdDwEB/wQEAwIBBjAdBgNVHSUEFjAUBggrBgEF - BQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNHADBE - AiBQ/AAwF8kG8VOmRSUTPakSSa/N4mqK2HsZuhQXCmiZHwIgZEzI5DCkpU7w3SIv - OLO4Zsk1XrGZHGsmyiEyvYF9lpY= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3419,6 +3205,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3426,6 +3215,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity diff --git a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden index 5b758fe1cb733..c3055f4b133ae 100644 --- a/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden +++ b/cli/cmd/testdata/upgrade_two_level_webhook_cert.golden @@ -1793,222 +1793,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - labels: - app.kubernetes.io/name: proxy-injector - app.kubernetes.io/part-of: Linkerd - app.kubernetes.io/version: UPGRADE-CONTROL-PLANE-VERSION - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - name: linkerd-proxy-injector - namespace: linkerd -spec: - replicas: 1 - selector: - matchLabels: - linkerd.io/control-plane-component: proxy-injector - template: - metadata: - annotations: - linkerd.io/created-by: linkerd/cli dev-undefined - linkerd.io/identity-mode: default - linkerd.io/proxy-version: UPGRADE-PROXY-VERSION - labels: - linkerd.io/control-plane-component: proxy-injector - linkerd.io/control-plane-ns: linkerd - linkerd.io/workload-ns: linkerd - linkerd.io/proxy-deployment: linkerd-proxy-injector - spec: - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - args: - - proxy-injector - - -log-level=info - image: gcr.io/linkerd-io/controller:UPGRADE-CONTROL-PLANE-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /ping - port: 9995 - initialDelaySeconds: 10 - name: proxy-injector - ports: - - containerPort: 8443 - name: proxy-injector - - containerPort: 9995 - name: admin-http - readinessProbe: - failureThreshold: 7 - httpGet: - path: /-/ready - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - volumeMounts: - - mountPath: /data - name: data - - mountPath: /etc/prometheus/prometheus.yml - name: prometheus-config - subPath: prometheus.yml - readOnly: true - - env: - - name: LINKERD2_PROXY_LOG - value: warn,linkerd=info - - name: LINKERD2_PROXY_LOG_FORMAT - value: plain - - name: LINKERD2_PROXY_DESTINATION_SVC_ADDR - value: linkerd-dst.linkerd.svc.cluster.local:8086 - - name: LINKERD2_PROXY_DESTINATION_GET_NETWORKS - value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" - - name: LINKERD2_PROXY_CONTROL_LISTEN_ADDR - value: 0.0.0.0:4190 - - name: LINKERD2_PROXY_ADMIN_LISTEN_ADDR - value: 0.0.0.0:4191 - - name: LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR - value: 127.0.0.1:4140 - - name: LINKERD2_PROXY_INBOUND_LISTEN_ADDR - value: 0.0.0.0:4143 - - name: LINKERD2_PROXY_DESTINATION_GET_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES - value: svc.cluster.local. - - name: LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE - value: 10000ms - - name: LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE - value: 10000ms - - name: _pod_ns - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LINKERD2_PROXY_DESTINATION_CONTEXT - value: ns:$(_pod_ns) - - name: LINKERD2_PROXY_OUTBOUND_ROUTER_CAPACITY - value: "10000" - - name: LINKERD2_PROXY_IDENTITY_DIR - value: /var/run/linkerd/identity/end-entity - - name: LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS - value: | - -----BEGIN CERTIFICATE----- - MIIBwDCCAWagAwIBAgIQMvd1QnGUJzXVUt3gNh7rWjAKBggqhkjOPQQDAjApMScw - JQYDVQQDEx5pZGVudGl0eS5saW5rZXJkLmNsdXN0ZXIubG9jYWwwHhcNMjAwNDA2 - MTAzOTUxWhcNMzAwNDA0MTAzOTUxWjApMScwJQYDVQQDEx5pZGVudGl0eS5saW5r - ZXJkLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQ19nmg - Q8l+EMofPxas7HUlOJE5avps6b6Q97Y71Waw3rdXYNCPqMxa4PedPc5VKGje6eqJ - Ao5mX29HeMcUw/y3o3AwbjAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB - /wIBATAdBgNVHQ4EFgQUfxv+BcCt5v7oF7PXJ9xY+JambdwwKQYDVR0RBCIwIIIe - aWRlbnRpdHkubGlua2VyZC5jbHVzdGVyLmxvY2FsMAoGCCqGSM49BAMCA0gAMEUC - IQCM8UfevR53SVGDd/4MgXMlVqC3Vh8oDiM0UToj2wsjNgIgLnZgogrqjK0KRo9R - SxZLbJKt6SJIIY9dw5gzQpUQR2U= - -----END CERTIFICATE----- - - name: LINKERD2_PROXY_IDENTITY_TOKEN_FILE - value: /var/run/secrets/kubernetes.io/serviceaccount/token - - name: LINKERD2_PROXY_IDENTITY_SVC_ADDR - value: linkerd-identity.linkerd.svc.cluster.local:8080 - - name: _pod_sa - valueFrom: - fieldRef: - fieldPath: spec.serviceAccountName - - name: _l5d_ns - value: linkerd - - name: _l5d_trustdomain - value: cluster.local - - name: LINKERD2_PROXY_IDENTITY_LOCAL_NAME - value: $(_pod_sa).$(_pod_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_IDENTITY_SVC_NAME - value: linkerd-identity.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_DESTINATION_SVC_NAME - value: linkerd-destination.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - - name: LINKERD2_PROXY_TAP_SVC_NAME - value: linkerd-tap.$(_l5d_ns).serviceaccount.identity.$(_l5d_ns).$(_l5d_trustdomain) - image: gcr.io/linkerd-io/proxy:UPGRADE-PROXY-VERSION - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /live - port: 4191 - initialDelaySeconds: 10 - name: linkerd-proxy - ports: - - containerPort: 4143 - name: linkerd-proxy - - containerPort: 4191 - name: linkerd-admin - readinessProbe: - httpGet: - path: /ready - port: 4191 - initialDelaySeconds: 2 - resources: - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 2102 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /var/run/linkerd/identity/end-entity - name: linkerd-identity-end-entity - initContainers: - - args: - - --incoming-proxy-port - - "4143" - - --outgoing-proxy-port - - "4140" - - --proxy-uid - - "2102" - - --inbound-ports-to-ignore - - 4190,4191,2525-2527,2529 - - --outbound-ports-to-ignore - - "443" - image: gcr.io/linkerd-io/proxy-init:v1.3.3 - imagePullPolicy: IfNotPresent - name: linkerd-init - resources: - limits: - cpu: "100m" - memory: "50Mi" - requests: - cpu: "10m" - memory: "10Mi" - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: false - runAsUser: 0 - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /run - name: linkerd-proxy-init-xtables-lock - serviceAccountName: linkerd-prometheus - volumes: - - name: data - emptyDir: {} - - configMap: - name: linkerd-prometheus-config - name: prometheus-config - - emptyDir: {} - name: linkerd-proxy-init-xtables-lock - - emptyDir: - medium: Memory - name: linkerd-identity-end-entity ---- -### -### Proxy Injector -### ---- -apiVersion: apps/v1 -kind: Deployment metadata: annotations: linkerd.io/created-by: linkerd/cli dev-undefined @@ -3439,6 +3223,9 @@ spec: runAsNonRoot: false runAsUser: 0 terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /run + name: linkerd-proxy-init-xtables-lock serviceAccountName: linkerd-prometheus volumes: - name: data @@ -3446,6 +3233,8 @@ spec: - configMap: name: linkerd-prometheus-config name: prometheus-config + - emptyDir: {} + name: linkerd-proxy-init-xtables-lock - emptyDir: medium: Memory name: linkerd-identity-end-entity