diff --git a/charts/sentry/values.yaml b/charts/sentry/values.yaml index b3a629848..756393980 100644 --- a/charts/sentry/values.yaml +++ b/charts/sentry/values.yaml @@ -76,7 +76,7 @@ vroom: service: annotations: {} # tolerations: [] - # podLabels: [] + # podLabels: {} autoscaling: enabled: false @@ -114,7 +114,7 @@ relay: service: annotations: {} # tolerations: [] - # podLabels: [] + # podLabels: {} # priorityClassName: "" autoscaling: enabled: false @@ -178,7 +178,7 @@ sentry: service: annotations: {} # tolerations: [] - # podLabels: [] + # podLabels: {} # Mount and use custom CA # customCA: # secretName: custom-ca @@ -217,7 +217,7 @@ sentry: affinity: {} nodeSelector: {} # tolerations: [] - # podLabels: [] + # podLabels: {} # logLevel: "WARNING" # DEBUG|INFO|WARNING|ERROR|CRITICAL|FATAL # logFormat: "machine" # human|machine # excludeQueues: "" @@ -249,7 +249,7 @@ sentry: affinity: {} nodeSelector: {} # tolerations: [] - # podLabels: [] + # podLabels: {} # it's better to use prometheus adapter and scale based on # the size of the rabbitmq queue @@ -279,7 +279,7 @@ sentry: affinity: {} nodeSelector: {} # tolerations: [] - # podLabels: [] + # podLabels: {} # it's better to use prometheus adapter and scale based on # the size of the rabbitmq queue @@ -309,7 +309,7 @@ sentry: securityContext: {} containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} # maxBatchSize: "" # it's better to use prometheus adapter and scale based on @@ -344,7 +344,7 @@ sentry: securityContext: {} containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} # maxBatchSize: "" # logLevel: "info" # inputBlockSize: "" @@ -382,7 +382,7 @@ sentry: securityContext: {} containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} # maxBatchSize: "" # logLevel: "info" # inputBlockSize: "" @@ -419,7 +419,7 @@ sentry: securityContext: {} containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} # it's better to use prometheus adapter and scale based on # the size of the rabbitmq queue @@ -449,7 +449,7 @@ sentry: securityContext: {} containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} # it's better to use prometheus adapter and scale based on # the size of the rabbitmq queue @@ -479,7 +479,7 @@ sentry: securityContext: {} containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} # it's better to use prometheus adapter and scale based on # the size of the rabbitmq queue @@ -510,7 +510,7 @@ sentry: securityContext: {} containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} # it's better to use prometheus adapter and scale based on # the size of the rabbitmq queue @@ -540,7 +540,7 @@ sentry: securityContext: {} containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} # it's better to use prometheus adapter and scale based on # the size of the rabbitmq queue @@ -572,7 +572,7 @@ sentry: securityContext: {} containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} # maxPollIntervalMs: "" # logLevel: "info" @@ -606,7 +606,7 @@ sentry: securityContext: {} containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} # logLevel: "info" # maxPollIntervalMs: "" @@ -637,7 +637,7 @@ sentry: affinity: {} nodeSelector: {} # tolerations: [] - # podLabels: [] + # podLabels: {} sidecars: [] topologySpreadConstraints: [] volumes: [] @@ -655,7 +655,7 @@ sentry: securityContext: {} containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} sidecars: [] topologySpreadConstraints: [] volumes: [] @@ -675,7 +675,7 @@ sentry: securityContext: {} containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} sidecars: [] topologySpreadConstraints: [] volumes: [] @@ -692,7 +692,7 @@ sentry: securityContext: {} containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} sidecars: [] topologySpreadConstraints: [] volumes: [] @@ -714,7 +714,7 @@ sentry: securityContext: {} containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} sidecars: [] topologySpreadConstraints: [] volumes: [] @@ -735,7 +735,7 @@ sentry: securityContext: {} containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} sidecars: [] topologySpreadConstraints: [] volumes: [] @@ -755,7 +755,7 @@ sentry: securityContext: {} containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} sidecars: [] topologySpreadConstraints: [] volumes: [] @@ -775,7 +775,7 @@ sentry: securityContext: {} containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} sidecars: [] topologySpreadConstraints: [] volumes: [] @@ -795,7 +795,7 @@ sentry: securityContext: {} containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} sidecars: [] topologySpreadConstraints: [] volumes: [] @@ -846,7 +846,7 @@ snuba: service: annotations: {} # tolerations: [] - # podLabels: [] + # podLabels: {} autoscaling: enabled: false @@ -869,7 +869,7 @@ snuba: topologySpreadConstraints: [] containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} autoOffsetReset: "earliest" livenessProbe: enabled: true @@ -903,7 +903,7 @@ snuba: topologySpreadConstraints: [] containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} autoOffsetReset: "earliest" # noStrictOffsetReset: false maxBatchSize: "3" @@ -937,7 +937,7 @@ snuba: topologySpreadConstraints: [] containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} autoOffsetReset: "earliest" # noStrictOffsetReset: false maxBatchSize: "3" @@ -971,7 +971,7 @@ snuba: topologySpreadConstraints: [] containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} autoOffsetReset: "earliest" # maxBatchTimeMs: "" # queuedMaxMessagesKbytes: "" @@ -990,7 +990,7 @@ snuba: topologySpreadConstraints: [] containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} autoOffsetReset: "earliest" livenessProbe: enabled: true @@ -1018,7 +1018,7 @@ snuba: topologySpreadConstraints: [] containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} autoOffsetReset: "earliest" livenessProbe: enabled: true @@ -1038,7 +1038,7 @@ snuba: topologySpreadConstraints: [] containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} autoOffsetReset: "earliest" livenessProbe: enabled: true @@ -1066,7 +1066,7 @@ snuba: topologySpreadConstraints: [] containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} autoOffsetReset: "earliest" livenessProbe: enabled: true @@ -1094,7 +1094,7 @@ snuba: topologySpreadConstraints: [] containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} autoOffsetReset: "earliest" livenessProbe: enabled: true @@ -1122,7 +1122,7 @@ snuba: topologySpreadConstraints: [] containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} autoOffsetReset: "earliest" livenessProbe: enabled: true @@ -1142,7 +1142,7 @@ snuba: topologySpreadConstraints: [] containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} autoOffsetReset: "earliest" # volumes: [] # volumeMounts: [] @@ -1161,7 +1161,7 @@ snuba: topologySpreadConstraints: [] containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} # commitBatchSize: 1 autoOffsetReset: "earliest" sidecars: [] @@ -1180,7 +1180,7 @@ snuba: topologySpreadConstraints: [] containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} autoOffsetReset: "earliest" livenessProbe: enabled: true @@ -1213,7 +1213,7 @@ snuba: topologySpreadConstraints: [] containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} autoOffsetReset: "earliest" # noStrictOffsetReset: false # maxBatchSize: "" @@ -1243,7 +1243,7 @@ snuba: topologySpreadConstraints: [] containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} autoOffsetReset: "earliest" livenessProbe: enabled: true @@ -1277,7 +1277,7 @@ snuba: topologySpreadConstraints: [] containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} autoOffsetReset: "earliest" livenessProbe: enabled: true @@ -1310,7 +1310,7 @@ snuba: topologySpreadConstraints: [] containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} autoOffsetReset: "earliest" livenessProbe: enabled: true @@ -1343,7 +1343,7 @@ snuba: topologySpreadConstraints: [] containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} autoOffsetReset: "earliest" livenessProbe: enabled: true @@ -1377,7 +1377,7 @@ snuba: topologySpreadConstraints: [] containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} autoOffsetReset: "earliest" livenessProbe: enabled: true @@ -1411,7 +1411,7 @@ snuba: topologySpreadConstraints: [] containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} autoOffsetReset: "earliest" livenessProbe: enabled: true @@ -1459,7 +1459,7 @@ hooks: # pullPolicy: IfNotPresent imagePullSecrets: [] env: [] - # podLabels: [] + # podLabels: {} podAnnotations: {} resources: limits: @@ -1477,7 +1477,7 @@ hooks: dbInit: enabled: true env: [] - # podLabels: [] + # podLabels: {} podAnnotations: {} resources: limits: @@ -1499,7 +1499,7 @@ hooks: # Note that when you set `kafka.enabled` to `false`, snuba component might fail to start if newly added topics are not created by `kafka.provisioning`. kafka: enabled: true - # podLabels: [] + # podLabels: {} podAnnotations: {} resources: limits: @@ -1515,7 +1515,7 @@ hooks: # volumeMounts: [] snubaMigrate: enabled: true - # podLabels: [] + # podLabels: {} # volumes: [] # volumeMounts: [] @@ -1555,7 +1555,7 @@ symbolicator: topologySpreadConstraints: [] containerSecurityContext: {} # tolerations: [] - # podLabels: [] + # podLabels: {} # priorityClassName: "xxx" config: |- # See: https://getsentry.github.io/symbolicator/#configuration @@ -1593,7 +1593,7 @@ symbolicator: # TODO The cleanup cronjob is not yet implemented cleanup: enabled: false - # podLabels: [] + # podLabels: {} # affinity: {} # env: [] @@ -2201,7 +2201,7 @@ metrics: containerSecurityContext: {} # schedulerName: # Optional extra labels for pod, i.e. redis-client: "true" - # podLabels: [] + # podLabels: {} service: type: ClusterIP labels: {}