Skip to content

Commit

Permalink
Merge branch 'master' into alibaba-doc
Browse files Browse the repository at this point in the history
  • Loading branch information
aylei authored May 29, 2019
2 parents 691c195 + 3a06f1e commit 670c641
Show file tree
Hide file tree
Showing 67 changed files with 3,087 additions and 712 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ tests/images/fault-trigger/bin/
tests/images/e2e/tidb-cluster/
tests/images/e2e/tidb-backup/
tests/images/e2e/tidb-operator/
tests/images/e2e/manifests/
*.tar
tmp/
data/
Expand Down
7 changes: 6 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ docker-push: docker
docker: build
docker build --tag "${DOCKER_REGISTRY}/pingcap/tidb-operator:latest" images/tidb-operator

build: controller-manager scheduler discovery
build: controller-manager scheduler discovery admission-controller

controller-manager:
$(GO) -ldflags '$(LDFLAGS)' -o images/tidb-operator/bin/tidb-controller-manager cmd/controller-manager/main.go
Expand All @@ -39,6 +39,9 @@ scheduler:
discovery:
$(GO) -ldflags '$(LDFLAGS)' -o images/tidb-operator/bin/tidb-discovery cmd/discovery/main.go

admission-controller:
$(GO) -ldflags '$(LDFLAGS)' -o images/tidb-operator/bin/tidb-admission-controller cmd/admission-controller/main.go

e2e-setup:
# ginkgo doesn't work with retool for Go 1.11
@GO111MODULE=on CGO_ENABLED=0 go get github.com/onsi/ginkgo@v1.6.0
Expand All @@ -50,9 +53,11 @@ e2e-docker: e2e-build
[ -d tests/images/e2e/tidb-operator ] && rm -r tests/images/e2e/tidb-operator || true
[ -d tests/images/e2e/tidb-cluster ] && rm -r tests/images/e2e/tidb-cluster || true
[ -d tests/images/e2e/tidb-backup ] && rm -r tests/images/e2e/tidb-backup || true
[ -d tests/images/e2e/manifests ] && rm -r tests/images/e2e/manifests || true
cp -r charts/tidb-operator tests/images/e2e
cp -r charts/tidb-cluster tests/images/e2e
cp -r charts/tidb-backup tests/images/e2e
cp -r manifests tests/images/e2e
docker build -t "${DOCKER_REGISTRY}/pingcap/tidb-operator-e2e:latest" tests/images/e2e

e2e-build: e2e-setup
Expand Down
10 changes: 5 additions & 5 deletions charts/tidb-backup/templates/scripts/_start_restore.sh.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,9 @@ downloader \
{{- end }}

/loader \
-d ${dirname} \
-h `eval echo '${'$host'}'` \
-u ${TIDB_USER} \
-p ${TIDB_PASSWORD} \
-P 4000 \
-d=${dirname} \
-h=`eval echo '${'$host'}'` \
-u=${TIDB_USER} \
-p=${TIDB_PASSWORD} \
-P=4000 \
{{ .Values.restoreOptions }}
9 changes: 5 additions & 4 deletions charts/tidb-backup/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,14 @@ mode: backup # backup | restore
name: fullbackup-20190306
image:
pullPolicy: IfNotPresent
binlog: pingcap/tidb-binlog:v2.1.8
binlog: pingcap/tidb-binlog:v3.0.0-rc.1
# https://github.com/tennix/tidb-cloud-backup
backup: pingcap/tidb-cloud-backup:latest

# secretName is the name of the secret which stores user and password used for backup/restore
# Note: you must give the user enough privilege to do the backup and restore
# you can create the secret by:
# kubectl create secret generic backup-secret --from-literal=user=root --from-literal=password=<password>
# kubectl create secret generic backup-secret --namespace=<namespace> --from-literal=user=root --from-literal=password=<password>
secretName: backup-secret

storage:
Expand All @@ -39,7 +39,8 @@ gcp: {}
# The service account must have read/write permission to the above bucket.
# Read the following document to create the service account and download the credentials file as credentials.json:
# https://cloud.google.com/docs/authentication/production#obtaining_and_providing_service_account_credentials_manually
# And then create the secret by: kubectl create secret generic gcp-backup-secret --from-file=./credentials.json
# And then create the secret by:
# kubectl create secret generic gcp-backup-secret --namespace=<namespace> --from-file=./credentials.json
# secretName: gcp-backup-secret

# backup to or restore from ceph bucket, the backup path is in the form of <clusterName>-<name>
Expand All @@ -48,5 +49,5 @@ ceph: {}
# bucket: ""
# secretName is the name of the secret which stores ceph object store access key and secret key
# You can create the secret by:
# kubectl create secret generic ceph-backup-secret --from-literal=access_key=<access-key> --from-literal=secret_key=<secret-key>
# kubectl create secret generic ceph-backup-secret --namespace=<namespace> --from-literal=access_key=<access-key> --from-literal=secret_key=<secret-key>
# secretName: ceph-backup-secret
2 changes: 1 addition & 1 deletion charts/tidb-cluster/templates/config/_pd-config.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ max-replicas = {{ .Values.pd.maxReplicas }}
# The placement priorities is implied by the order of label keys.
# For example, ["zone", "rack"] means that we should place replicas to
# different zones first, then to different racks if we don't have enough zones.
location-labels = ["zone", "rack", "host"]
location-labels = ["region", "zone", "rack", "host"]

[label-property]
# Do not assign region leaders to stores that have these tags.
Expand Down
3 changes: 3 additions & 0 deletions charts/tidb-cluster/templates/scripts/_start_pd.sh.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@
#

set -uo pipefail

{{ .Values.pd.preStartScript }}

ANNOTATIONS="/etc/podinfo/annotations"

if [[ ! -f "${ANNOTATIONS}" ]]
Expand Down
3 changes: 3 additions & 0 deletions charts/tidb-cluster/templates/scripts/_start_tidb.sh.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@
# runmode="normal/debug"
#
set -uo pipefail

{{ .Values.tidb.preStartScript }}

ANNOTATIONS="/etc/podinfo/annotations"

if [[ ! -f "${ANNOTATIONS}" ]]
Expand Down
3 changes: 3 additions & 0 deletions charts/tidb-cluster/templates/scripts/_start_tikv.sh.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@
#

set -uo pipefail

{{ .Values.tikv.preStartScript }}

ANNOTATIONS="/etc/podinfo/annotations"

if [[ ! -f "${ANNOTATIONS}" ]]
Expand Down
15 changes: 6 additions & 9 deletions charts/tidb-cluster/templates/tidb-cluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,10 @@ spec:
{{- if .Values.pd.resources }}
{{ toYaml .Values.pd.resources | indent 4 }}
{{- end }}
{{- if .Values.pd.nodeSelector }}
affinity:
{{ toYaml .Values.pd.affinity | indent 6 }}
nodeSelector:
{{ toYaml .Values.pd.nodeSelector | indent 6 }}
{{- end }}
nodeSelectorRequired: {{ .Values.nodeSelectorRequired | default true }}
{{- if .Values.pd.tolerations }}
tolerations:
{{ toYaml .Values.pd.tolerations | indent 4 }}
Expand All @@ -56,11 +55,10 @@ spec:
{{- if .Values.tikv.resources }}
{{ toYaml .Values.tikv.resources | indent 4 }}
{{- end }}
{{- if .Values.tikv.nodeSelector }}
affinity:
{{ toYaml .Values.tikv.affinity | indent 6 }}
nodeSelector:
{{ toYaml .Values.tikv.nodeSelector | indent 6 }}
{{- end }}
nodeSelectorRequired: {{ .Values.nodeSelectorRequired | default true }}
{{- if .Values.tikv.tolerations }}
tolerations:
{{ toYaml .Values.tikv.tolerations | indent 4 }}
Expand All @@ -76,11 +74,10 @@ spec:
{{- if .Values.tidb.resources }}
{{ toYaml .Values.tidb.resources | indent 4 }}
{{- end }}
{{- if .Values.tidb.nodeSelector }}
affinity:
{{ toYaml .Values.tidb.affinity | indent 6 }}
nodeSelector:
{{ toYaml .Values.tidb.nodeSelector | indent 6 }}
{{- end }}
nodeSelectorRequired: {{ .Values.nodeSelectorRequired | default true }}
{{- if .Values.tidb.tolerations }}
tolerations:
{{ toYaml .Values.tidb.tolerations | indent 4 }}
Expand Down
115 changes: 94 additions & 21 deletions charts/tidb-cluster/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ enableConfigMapRollout: false

pd:
replicas: 3
image: pingcap/pd:v2.1.8
image: pingcap/pd:v3.0.0-rc.1
logLevel: info
# storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer.
# different classes might map to quality-of-service levels, or to backup policies,
Expand All @@ -73,16 +73,72 @@ pd:
# cpu: 4000m
# memory: 4Gi
storage: 1Gi
# nodeSelector is used for scheduling pod,
# if nodeSelectorRequired is true, all the following labels must be matched

## affinity defines pd scheduling rules,it's default settings is empty.
## please read the affinity document before set your scheduling rule:
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
## The following is typical example of affinity settings:
## The PodAntiAffinity setting of the example keeps PD pods does not co-locate on a topology node as far as possible to improve the disaster tolerance of PD on Kubernetes.
## The NodeAffinity setting of the example ensure that the PD pods can only be scheduled to nodes with label:[type="pd"],
# affinity:
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# # this term work when the nodes have the label named region
# - weight: 10
# podAffinityTerm:
# labelSelector:
# matchLabels:
# app.kubernetes.io/instance: <release name>
# app.kubernetes.io/component: "pd"
# topologyKey: "region"
# namespaces:
# - <helm namespace>
# # this term work when the nodes have the label named zone
# - weight: 20
# podAffinityTerm:
# labelSelector:
# matchLabels:
# app.kubernetes.io/instance: <release name>
# app.kubernetes.io/component: "pd"
# topologyKey: "zone"
# namespaces:
# - <helm namespace>
# # this term work when the nodes have the label named rack
# - weight: 40
# podAffinityTerm:
# labelSelector:
# matchLabels:
# app.kubernetes.io/instance: <release name>
# app.kubernetes.io/component: "pd"
# topologyKey: "rack"
# namespaces:
# - <helm namespace>
# # this term work when the nodes have the label named kubernetes.io/hostname
# - weight: 80
# podAffinityTerm:
# labelSelector:
# matchLabels:
# app.kubernetes.io/instance: <release name>
# app.kubernetes.io/component: "pd"
# topologyKey: "kubernetes.io/hostname"
# namespaces:
# - <helm namespace>
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: "kind"
# operator: In
# values:
# - "pd"

## nodeSelector ensure pods only assigning to nodes which have each of the indicated key-value pairs as labels
## ref:https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
# kind: pd
# # zone is comma separated availability zone list
# zone: cn-bj1-01,cn-bj1-02
# # region is comma separated region list
# region: cn-bj1
# Tolerations are applied to pods, and allow pods to schedule onto nodes with matching taints.
# refer to https://kubernetes.io/docs/concepts/configuration/taint-and-toleration

## Tolerations are applied to pods, and allow pods to schedule onto nodes with matching taints.
## refer to https://kubernetes.io/docs/concepts/configuration/taint-and-toleration
tolerations: []
# - key: node-role
# operator: Equal
Expand All @@ -92,7 +148,7 @@ pd:

tikv:
replicas: 3
image: pingcap/tikv:v2.1.8
image: pingcap/tikv:v3.0.0-rc.1
logLevel: info
# storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer.
# different classes might map to quality-of-service levels, or to backup policies,
Expand All @@ -117,10 +173,18 @@ tikv:
# cpu: 12000m
# memory: 24Gi
storage: 10Gi

## affinity defines tikv scheduling rules,affinity default settings is empty.
## please read the affinity document before set your scheduling rule:
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}

## nodeSelector ensure pods only assigning to nodes which have each of the indicated key-value pairs as labels
## ref:https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
# kind: tikv
# zone: cn-bj1-01,cn-bj1-02
# region: cn-bj1

## Tolerations are applied to pods, and allow pods to schedule onto nodes with matching taints.
## refer to https://kubernetes.io/docs/concepts/configuration/taint-and-toleration
tolerations: []
# - key: node-role
# operator: Equal
Expand Down Expand Up @@ -155,7 +219,7 @@ tidb:
# initSql is the SQL statements executed after the TiDB cluster is bootstrapped.
# initSql: |-
# create database app;
image: pingcap/tidb:v2.1.8
image: pingcap/tidb:v3.0.0-rc.1
# Image pull policy.
imagePullPolicy: IfNotPresent
logLevel: info
Expand Down Expand Up @@ -196,10 +260,19 @@ tidb:
requests: {}
# cpu: 12000m
# memory: 12Gi


## affinity defines tikv scheduling rules,affinity default settings is empty.
## please read the affinity document before set your scheduling rule:
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}

## nodeSelector ensure pods only assigning to nodes which have each of the indicated key-value pairs as labels
## ref:https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
# kind: tidb
# zone: cn-bj1-01,cn-bj1-02
# region: cn-bj1

## Tolerations are applied to pods, and allow pods to schedule onto nodes with matching taints.
## refer to https://kubernetes.io/docs/concepts/configuration/taint-and-toleration
tolerations: []
# - key: node-role
# operator: Equal
Expand Down Expand Up @@ -301,7 +374,7 @@ binlog:
pump:
create: false
replicas: 1
image: pingcap/tidb-binlog:v2.1.8
image: pingcap/tidb-binlog:v3.0.0-rc.1
imagePullPolicy: IfNotPresent
logLevel: info
# storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer.
Expand All @@ -319,7 +392,7 @@ binlog:

drainer:
create: false
image: pingcap/tidb-binlog:v2.1.8
image: pingcap/tidb-binlog:v3.0.0-rc.1
imagePullPolicy: IfNotPresent
logLevel: info
# storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer.
Expand Down Expand Up @@ -364,7 +437,7 @@ binlog:

scheduledBackup:
create: false
binlogImage: pingcap/tidb-binlog:v2.1.8
binlogImage: pingcap/tidb-binlog:v3.0.0-rc.1
binlogImagePullPolicy: IfNotPresent
# https://github.com/tennix/tidb-cloud-backup
mydumperImage: pingcap/tidb-cloud-backup:latest
Expand Down
Loading

0 comments on commit 670c641

Please sign in to comment.