-
Notifications
You must be signed in to change notification settings - Fork 500
/
values.yaml
689 lines (632 loc) · 26.1 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
# Default values for tidb-cluster.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# Also see monitor.serviceAccount
# If you set rbac.create to false, you need to provide a value for monitor.serviceAccount
rbac:
create: true
# Set to true to enable cross-namespace monitoring
crossNamespace: false
# clusterName is the TiDB cluster name, if not specified, the chart release name will be used
# clusterName: demo
# Add extra labels to TidbCluster object
# ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
extraLabels: {}
# schedulerName must be same with charts/tidb-operator/values#scheduler.schedulerName
schedulerName: tidb-scheduler
# timezone is the default system timzone for TiDB
timezone: UTC
# default reclaim policy of a PV
pvReclaimPolicy: Retain
# services is the service list to expose, default is ClusterIP
# can be ClusterIP | NodePort | LoadBalancer
services:
- name: pd
type: ClusterIP
discovery:
image: pingcap/tidb-operator:v1.0.1
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: 250m
memory: 150Mi
requests:
cpu: 80m
memory: 50Mi
# Whether enable ConfigMap Rollout management.
# When enabling, change of ConfigMap will trigger a graceful rolling-update of the component.
# This feature is only available in tidb-operator v1.0 or higher.
# Note: Switch this variable against an existing cluster will cause an rolling-update of each component even
# if the ConfigMap was not changed.
enableConfigMapRollout: true
# Whether enable TLS connections between server nodes.
# When enabled, PD/TiDB/TiKV will use TLS encrypted connections to transfer data between each node,
# certificates will be generated automatically (if not already present).
enableTLSCluster: false
pd:
# Please refer to https://github.com/pingcap/pd/blob/master/conf/config.toml for the default
# pd configurations (change to the tags of your pd version),
# just follow the format in the file and configure in the 'config' section
# as below if you want to customize any configuration.
# Please refer to https://pingcap.com/docs-cn/v3.0/reference/configuration/pd-server/configuration-file/
# (choose the version matching your pd) for detailed explanation of each parameter.
config: |
[log]
level = "info"
[replication]
location-labels = ["region", "zone", "rack", "host"]
replicas: 3
image: pingcap/pd:v3.0.1
# storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer.
# different classes might map to quality-of-service levels, or to backup policies,
# or to arbitrary policies determined by the cluster administrators.
# refer to https://kubernetes.io/docs/concepts/storage/storage-classes
storageClassName: local-storage
# Image pull policy.
imagePullPolicy: IfNotPresent
resources:
limits: {}
# cpu: 8000m
# memory: 8Gi
requests:
# cpu: 4000m
# memory: 4Gi
storage: 1Gi
## affinity defines pd scheduling rules,it's default settings is empty.
## please read the affinity document before set your scheduling rule:
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
## The following is typical example of affinity settings:
## The PodAntiAffinity setting of the example keeps PD pods does not co-locate on a topology node as far as possible to improve the disaster tolerance of PD on Kubernetes.
## The NodeAffinity setting of the example ensure that the PD pods can only be scheduled to nodes with label:[type="pd"],
# affinity:
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# # this term work when the nodes have the label named region
# - weight: 10
# podAffinityTerm:
# labelSelector:
# matchLabels:
# app.kubernetes.io/instance: <release name>
# app.kubernetes.io/component: "pd"
# topologyKey: "region"
# namespaces:
# - <helm namespace>
# # this term work when the nodes have the label named zone
# - weight: 20
# podAffinityTerm:
# labelSelector:
# matchLabels:
# app.kubernetes.io/instance: <release name>
# app.kubernetes.io/component: "pd"
# topologyKey: "zone"
# namespaces:
# - <helm namespace>
# # this term work when the nodes have the label named rack
# - weight: 40
# podAffinityTerm:
# labelSelector:
# matchLabels:
# app.kubernetes.io/instance: <release name>
# app.kubernetes.io/component: "pd"
# topologyKey: "rack"
# namespaces:
# - <helm namespace>
# # this term work when the nodes have the label named kubernetes.io/hostname
# - weight: 80
# podAffinityTerm:
# labelSelector:
# matchLabels:
# app.kubernetes.io/instance: <release name>
# app.kubernetes.io/component: "pd"
# topologyKey: "kubernetes.io/hostname"
# namespaces:
# - <helm namespace>
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: "kind"
# operator: In
# values:
# - "pd"
## nodeSelector ensure pods only assigning to nodes which have each of the indicated key-value pairs as labels
## ref:https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
## Tolerations are applied to pods, and allow pods to schedule onto nodes with matching taints.
## refer to https://kubernetes.io/docs/concepts/configuration/taint-and-toleration
tolerations: []
# - key: node-role
# operator: Equal
# value: tidb
# effect: "NoSchedule"
annotations: {}
# Use the host's network namespace if enabled.
# Default to false.
hostNetwork: false
# Specify the security context of PD Pod.
# refer to https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
podSecurityContext: {}
# Specify the priorityClassName for PD Pod.
# refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#how-to-use-priority-and-preemption
priorityClassName: ""
tikv:
# Please refer to https://github.com/tikv/tikv/blob/master/etc/config-template.toml for the default
# tikv configurations (change to the tags of your tikv version),
# just follow the format in the file and configure in the 'config' section
# as below if you want to customize any configuration.
# Please refer to https://pingcap.com/docs-cn/v3.0/reference/configuration/tikv-server/configuration-file/
# (choose the version matching your tikv) for detailed explanation of each parameter.
config: |
log-level = "info"
# # Here are some parameters you MUST customize (Please configure in the above `tikv.config` section):
#
# [readpool.coprocessor]
# # Normally these three parameters should be tuned to 80% of `tikv.resources.limits.cpu`, for example: 10000m -> 8
# high-concurrency = 8
# normal-concurrency = 8
# low-concurrency = 8
#
# # For TiKV v2.x:
# [rocksdb.defaultcf]
# ## block-cache used to cache uncompressed blocks, big block-cache can speed up read.
# ## in normal cases should tune to 30%-50% `tikv.resources.limits.memory`
# # block-cache-size = "1GB"
#
# [rocksdb.writecf]
# ## in normal cases should tune to 10%-30% `tikv.resources.limits.memory`
# # block-cache-size = "256MB"
#
# # From TiKV v3.0.0 on, you do not need to configure
# # [rocksdb.defaultcf].block-cache-size and [rocksdb.writecf].block-cache-size.
# # Instead, configure [storage.block-cache] as below:
# [storage.block-cache]
# shared = true
#
# # Normally it should be tuned to 30%-50% of `tikv.resources.limits.memory`, for example: 32Gi -> 16GB
# capacity = "1GB"
# Note that we can't set raftstore.capacity in config because it will be overridden by the command line parameter,
# we can only set capacity in tikv.resources.limits.storage.
replicas: 3
image: pingcap/tikv:v3.0.1
# storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer.
# different classes might map to quality-of-service levels, or to backup policies,
# or to arbitrary policies determined by the cluster administrators.
# refer to https://kubernetes.io/docs/concepts/storage/storage-classes
storageClassName: local-storage
# Image pull policy.
imagePullPolicy: IfNotPresent
resources:
limits: {}
# cpu: 16000m
# memory: 32Gi
# storage: 300Gi # We can set capacity here.
requests:
# cpu: 12000m
# memory: 24Gi
storage: 10Gi
## affinity defines tikv scheduling rules,affinity default settings is empty.
## please read the affinity document before set your scheduling rule:
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
## nodeSelector ensure pods only assigning to nodes which have each of the indicated key-value pairs as labels
## ref:https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
## Tolerations are applied to pods, and allow pods to schedule onto nodes with matching taints.
## refer to https://kubernetes.io/docs/concepts/configuration/taint-and-toleration
tolerations: []
# - key: node-role
# operator: Equal
# value: tidb
# effect: "NoSchedule"
annotations: {}
# Use the host's network namespace if enabled.
# Default to false.
hostNetwork: false
# Specify the security context of TiKV Pod.
# refer to https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
podSecurityContext: {}
# Specify the priorityClassName for TiKV Pod.
# refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#how-to-use-priority-and-preemption
priorityClassName: ""
tidb:
# Please refer to https://github.com/pingcap/tidb/blob/master/config/config.toml.example for the default
# tidb configurations(change to the tags of your tidb version),
# just follow the format in the file and configure in the 'config' section
# as below if you want to customize any configuration.
# Please refer to https://pingcap.com/docs-cn/v3.0/reference/configuration/tidb-server/configuration-file/
# (choose the version matching your tidb) for detailed explanation of each parameter.
config: |
[log]
level = "info"
# # Here are some parameters you MUST customize (Please configure in the above 'tidb.config' section):
# [performance]
# # Normally it should be tuned to `tidb.resources.limits.cpu`, for example: 16000m -> 16
# max-procs = 0
replicas: 2
# The secret name of root password, you can create secret with following command:
# kubectl create secret generic tidb-secret --from-literal=root=<root-password> --namespace=<namespace>
# If unset, the root password will be empty and you can set it after connecting
# passwordSecretName: tidb-secret
# permitHost is the host which will only be allowed to connect to the TiDB.
# If unset, defaults to '%' which means allow any host to connect to the TiDB.
# permitHost: 127.0.0.1
# initSql is the SQL statements executed after the TiDB cluster is bootstrapped.
# The configmap name of init sql and the name of file must be init-sql with no file extension, you can create configmap with following command:
# kubectl create configmap tidb-initsql --from-file=init-sql=/path/to/your/init-sql --namespace=<namespace>
# If both initSqlConfigMapName and initSql are set, the initSqlConfigMapName will be used.
# initSqlConfigMapName: tidb-initsql
# initSql: |-
# create database app;
image: pingcap/tidb:v3.0.1
# Image pull policy.
imagePullPolicy: IfNotPresent
resources:
limits: {}
# cpu: 16000m
# memory: 16Gi
requests: {}
# cpu: 12000m
# memory: 12Gi
## affinity defines tikv scheduling rules,affinity default settings is empty.
## please read the affinity document before set your scheduling rule:
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
## nodeSelector ensure pods only assigning to nodes which have each of the indicated key-value pairs as labels
## ref:https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
## Tolerations are applied to pods, and allow pods to schedule onto nodes with matching taints.
## refer to https://kubernetes.io/docs/concepts/configuration/taint-and-toleration
tolerations: []
# - key: node-role
# operator: Equal
# value: tidb
# effect: "NoSchedule"
annotations: {}
# Use the host's network namespace if enabled.
# Default to false.
hostNetwork: false
# Specify the security context of TiDB Pod.
# refer to https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
podSecurityContext: {}
# Specify the priorityClassName for TiDB Pod.
# refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#how-to-use-priority-and-preemption
priorityClassName: ""
maxFailoverCount: 3
service:
type: NodePort
exposeStatus: true
# annotations:
# cloud.google.com/load-balancer-type: Internal
separateSlowLog: true
slowLogTailer:
image: busybox:1.26.2
resources:
limits:
cpu: 100m
memory: 50Mi
requests:
cpu: 20m
memory: 5Mi
initializer:
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
# requests:
# cpu: 100m
# memory: 100Mi
# tidb plugin configuration
plugin:
# enable plugin or not
enable: false
# the start argument to specify the folder containing
directory: /plugins
# the start argument to specify the plugin id (name "-" version) that needs to be loaded, e.g. 'conn_limit-1'.
list: ["whitelist-1"]
# Whether enable TLS connection between TiDB server and MySQL client.
# When enabled, TiDB will accept TLS encrypted connections from MySQL client, certificates will be generated
# automatically.
# Note: TLS connection is not forced on the server side, plain connections are also accepted after enableing.
enableTLSClient: false
# mysqlClient is used to set password for TiDB
# it must has Python MySQL client installed
mysqlClient:
image: tnir/mysqlclient
imagePullPolicy: IfNotPresent
monitor:
create: true
# Also see rbac.create
# If you set rbac.create to false, you need to provide a value here.
# If you set rbac.create to true, you should leave this empty.
# serviceAccount:
persistent: false
storageClassName: local-storage
storage: 10Gi
initializer:
image: pingcap/tidb-monitor-initializer:v3.0.1
imagePullPolicy: IfNotPresent
resources: {}
# limits:
# cpu: 50m
# memory: 64Mi
# requests:
# cpu: 50m
# memory: 64Mi
reloader:
create: true
image: pingcap/tidb-monitor-reloader:v1.0.0
imagePullPolicy: IfNotPresent
service:
type: NodePort
resources: {}
# limits:
# cpu: 50m
# memory: 64Mi
# requests:
# cpu: 50m
# memory: 64Mi
grafana:
create: true
image: grafana/grafana:6.0.1
imagePullPolicy: IfNotPresent
logLevel: info
resources:
limits: {}
# cpu: 8000m
# memory: 8Gi
requests: {}
# cpu: 4000m
# memory: 4Gi
username: admin
password: admin
config:
# Configure Grafana using environment variables except GF_PATHS_DATA, GF_SECURITY_ADMIN_USER and GF_SECURITY_ADMIN_PASSWORD
# Ref https://grafana.com/docs/installation/configuration/#using-environment-variables
GF_AUTH_ANONYMOUS_ENABLED: "true"
GF_AUTH_ANONYMOUS_ORG_NAME: "Main Org."
GF_AUTH_ANONYMOUS_ORG_ROLE: "Viewer"
# if grafana is running behind a reverse proxy with subpath http://foo.bar/grafana
# GF_SERVER_DOMAIN: foo.bar
# GF_SERVER_ROOT_URL: "%(protocol)s://%(domain)s/grafana/"
service:
type: NodePort
prometheus:
image: prom/prometheus:v2.11.1
imagePullPolicy: IfNotPresent
logLevel: info
resources:
limits: {}
# cpu: 8000m
# memory: 8Gi
requests: {}
# cpu: 4000m
# memory: 4Gi
service:
type: NodePort
reserveDays: 12
# alertmanagerURL: ""
nodeSelector: {}
# kind: monitor
# zone: cn-bj1-01,cn-bj1-02
# region: cn-bj1
tolerations: []
# - key: node-role
# operator: Equal
# value: tidb
# effect: "NoSchedule"
binlog:
pump:
create: false
replicas: 1
image: pingcap/tidb-binlog:v3.0.1
imagePullPolicy: IfNotPresent
logLevel: info
# storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer.
# different classes might map to quality-of-service levels, or to backup policies,
# or to arbitrary policies determined by the cluster administrators.
# refer to https://kubernetes.io/docs/concepts/storage/storage-classes
storageClassName: local-storage
storage: 20Gi
# affinity for pump pod assignment, default: empty
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
# tolerations are applied to pods, and allow pods to schedule onto nodes with matching taints.
# refer to https://kubernetes.io/docs/concepts/configuration/taint-and-toleration
tolerations: []
syncLog: true
# a integer value to control expiry date of the binlog data, indicates for how long (in days) the binlog data would be stored.
# must bigger than 0
gc: 7
# number of seconds between heartbeat ticks (in 2 seconds)
heartbeatInterval: 2
resources:
limits: {}
# cpu: 8000m
# memory: 8Gi
requests: {}
# cpu: 4000m
# memory: 4Gi
# Please refer to https://github.com/pingcap/tidb-binlog/blob/master/cmd/pump/pump.toml for the default
# pump configurations (change to the tags of your pump version),
# just follow the format in the file and configure in the 'config' section
# as below if you want to customize any configuration.
# config: |
# gc = 7
# heartbeat-interval = 2
# [storage]
# sync-log = true
# stop-write-at-available-space = "10Gi"
drainer:
create: false
image: pingcap/tidb-binlog:v3.0.1
imagePullPolicy: IfNotPresent
logLevel: info
# storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer.
# different classes might map to quality-of-service levels, or to backup policies,
# or to arbitrary policies determined by the cluster administrators.
# refer to https://kubernetes.io/docs/concepts/storage/storage-classes
storageClassName: local-storage
storage: 10Gi
# affinity for drainer pod assignment, default: empty
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
# tolerations are applied to pods, and allow pods to schedule onto nodes with matching taints.
# refer to https://kubernetes.io/docs/concepts/configuration/taint-and-toleration
tolerations: []
# the number of the concurrency of the downstream for synchronization. The bigger the value,
# the better throughput performance of the concurrency (16 by default)
workerCount: 16
# the interval time (in seconds) of detect pumps' status (default 10)
detectInterval: 10
# disbale detect causality
disableDetect: false
# disable dispatching sqls that in one same binlog; if set true, work-count and txn-batch would be useless
disableDispatch: false
# # disable sync these schema
ignoreSchemas: "INFORMATION_SCHEMA,PERFORMANCE_SCHEMA,mysql,test"
# if drainer donesn't have checkpoint, use initial commitTS to initial checkpoint
initialCommitTs: 0
# enable safe mode to make syncer reentrant
safeMode: false
# the number of SQL statements of a transaction that are output to the downstream database (20 by default)
txnBatch: 20
# downstream storage, equal to --dest-db-type
# valid values are "mysql", "file", "tidb", "kafka"
destDBType: file
mysql: {}
# host: "127.0.0.1"
# user: "root"
# password: ""
# port: 3306
# # Time and size limits for flash batch write
# timeLimit: "30s"
# sizeLimit: "100000"
kafka: {}
# only need config one of zookeeper-addrs and kafka-addrs, will get kafka address if zookeeper-addrs is configed.
# zookeeperAddrs: "127.0.0.1:2181"
# kafkaAddrs: "127.0.0.1:9092"
# kafkaVersion: "0.8.2.0"
resources:
limits: {}
# cpu: 8000m
# memory: 8Gi
requests: {}
# cpu: 4000m
# memory: 4Gi
# Please refer to https://github.com/pingcap/tidb-binlog/blob/master/cmd/drainer/drainer.toml for the default
# drainer configurations (change to the tags of your drainer version),
# just follow the format in the file and configure in the 'config' section
# as below if you want to customize any configuration.
# config: |
# worker-count = 16
# detect-interval = 10
# disable-dispatch = false
# ignore-schemas = "INFORMATION_SCHEMA,PERFORMANCE_SCHEMA,mysql"
# safe-mode = false
# txn-batch = 20
# db-type = "mysql"
# [syncer.to]
# # host = "127.0.0.1"
# # user = "root"
# # password = ""
# # port = 3306
scheduledBackup:
create: false
# https://github.com/pingcap/tidb-cloud-backup
mydumperImage: pingcap/tidb-cloud-backup:20190828
mydumperImagePullPolicy: IfNotPresent
# storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer.
# different classes might map to quality-of-service levels, or to backup policies,
# or to arbitrary policies determined by the cluster administrators.
# refer to https://kubernetes.io/docs/concepts/storage/storage-classes
storageClassName: local-storage
storage: 100Gi
# When set to true, cleans up the backup data on storage upon successful upload to the cloud object storage
cleanupAfterUpload: false
# https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule
schedule: "0 0 * * *"
# https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#suspend
suspend: false
# https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#jobs-history-limits
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 1
# https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#starting-deadline
startingDeadlineSeconds: 3600
# https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#pod-backoff-failure-policy
backoffLimit: 6
# https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#job-termination-and-cleanup
# activeDeadlineSeconds: 10800
# https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#handling-pod-and-container-failures
restartPolicy: OnFailure
# -t is thread count, larger thread count will speed up the backup, but may impact the performance of the upstream TiDB.
# -F is the chunk size, a big table is partitioned into many chunks.
# Other useful options are -B for database, and -T for tables.
# See https://github.com/maxbube/mydumper/blob/master/docs/mydumper_usage.rst#options for more options.
options: "-t 16 -F 256 --skip-tz-utc --verbose=3"
# The time limit during which data is retained for each GC when backup, in the format of Go Duration.
# When a GC happens, the current time minus this value is the safe point.
tikvGCLifeTime: 720h
# secretName is the name of the secret which stores user and password used for backup
# Note: you must give the user enough privilege to do the backup
# you can create the secret by:
# kubectl create secret generic backup-secret --from-literal=user=root --from-literal=password=<password>
secretName: backup-secret
# backup to gcp
gcp: {}
# bucket: ""
# secretName is the name of the secret which stores the gcp service account credentials json file
# The service account must have read/write permission to the above bucket.
# Read the following document to create the service account and download the credentials file as credentials.json:
# https://cloud.google.com/docs/authentication/production#obtaining_and_providing_service_account_credentials_manually
# And then create the secret by: kubectl create secret generic gcp-backup-secret --from-file=./credentials.json
# secretName: gcp-backup-secret
# backup to ceph object storage
ceph: {}
# endpoint: ""
# bucket: ""
# secretName is the name of the secret which stores ceph object store access key and secret key
# You can create the secret by:
# kubectl create secret generic ceph-backup-secret --from-literal=access_key=<access-key> --from-literal=secret_key=<secret-key>
# secretName: ceph-backup-secret
# backup to s3
s3: {}
# region: ""
# bucket: ""
# secretName is the name of the secret which stores s3 object store access key and secret key
# You can create the secret by:
# kubectl create secret generic s3-backup-secret --from-literal=access_key=<access-key> --from-literal=secret_key=<secret-key>
# secretName: s3-backup-secret
resources:
limits: {}
# cpu: 8000m
# memory: 8Gi
requests: {}
# cpu: 4000m
# memory: 4Gi
importer:
create: false
image: pingcap/tidb-lightning:v3.0.1
imagePullPolicy: IfNotPresent
storageClassName: local-storage
storage: 200Gi
resources: {}
# limits:
# cpu: 16000m
# memory: 8Gi
# requests:
# cpu: 16000m
# memory: 8Gi
affinity: {}
tolerations: []
pushgatewayImage: prom/pushgateway:v0.3.1
pushgatewayImagePullPolicy: IfNotPresent
config: |
log-level = "info"
[metrics]
job = "tikv-importer"
interval = "15s"
address = "localhost:9091"
metaInstance: "{{ $labels.instance }}"
metaType: "{{ $labels.type }}"
metaValue: "{{ $value }}"