Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Automated cherry pick of #915: Support configuring sysctls for Pods and enable net.* #954

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions charts/tidb-cluster/templates/tidb-cluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,8 @@ spec:
{{ toYaml .Values.pd.annotations | indent 6 }}
{{- end }}
hostNetwork: {{ .Values.pd.hostNetwork }}
podSecurityContext:
{{ toYaml .Values.pd.podSecurityContext | indent 6}}
tikv:
replicas: {{ .Values.tikv.replicas }}
image: {{ .Values.tikv.image }}
Expand All @@ -69,6 +71,8 @@ spec:
{{ toYaml .Values.tikv.annotations | indent 6 }}
{{- end }}
hostNetwork: {{ .Values.tikv.hostNetwork }}
podSecurityContext:
{{ toYaml .Values.tikv.podSecurityContext | indent 6}}
tidb:
replicas: {{ .Values.tidb.replicas }}
image: {{ .Values.tidb.image }}
Expand All @@ -89,6 +93,8 @@ spec:
{{ toYaml .Values.tidb.annotations | indent 6 }}
{{- end }}
hostNetwork: {{ .Values.tidb.hostNetwork }}
podSecurityContext:
{{ toYaml .Values.tidb.podSecurityContext | indent 6}}
binlogEnabled: {{ .Values.binlog.pump.create | default false }}
maxFailoverCount: {{ .Values.tidb.maxFailoverCount | default 3 }}
separateSlowLog: {{ .Values.tidb.separateSlowLog | default false }}
Expand Down
12 changes: 12 additions & 0 deletions charts/tidb-cluster/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,10 @@ pd:
# Default to false.
hostNetwork: false

# Specify the security context of PD Pod.
# refer to https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
podSecurityContext: {}

tikv:
# Please refer to https://github.com/tikv/tikv/blob/master/etc/config-template.toml for the default
# tikv configurations (change to the tags of your tikv version),
Expand Down Expand Up @@ -235,6 +239,10 @@ tikv:
# Default to false.
hostNetwork: false

# Specify the security context of TiKV Pod.
# refer to https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
podSecurityContext: {}

tidb:
# Please refer to https://github.com/pingcap/tidb/blob/master/config/config.toml.example for the default
# tidb configurations(change to the tags of your tidb version),
Expand Down Expand Up @@ -293,6 +301,10 @@ tidb:
# Default to false.
hostNetwork: false

# Specify the security context of TiDB Pod.
# refer to https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
podSecurityContext: {}

maxFailoverCount: 3
service:
type: NodePort
Expand Down
2 changes: 1 addition & 1 deletion deploy/aws/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ provider "aws" {
}

locals {
eks = module.tidb-operator.eks
eks = module.tidb-operator.eks
subnets = module.vpc.private_subnets
}

Expand Down
2 changes: 1 addition & 1 deletion deploy/modules/aws/tidb-cluster/data.tf
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ data "aws_ami" "eks_worker" {

data "template_file" "userdata" {
template = file("${path.module}/templates/userdata.sh.tpl")
count = local.worker_group_count
count = length(local.tidb_cluster_worker_groups)

vars = {
cluster_name = var.eks.cluster_id
Expand Down
80 changes: 53 additions & 27 deletions deploy/modules/aws/tidb-cluster/local.tf
Original file line number Diff line number Diff line change
Expand Up @@ -37,48 +37,74 @@ locals {

tidb_cluster_worker_groups = [
{
name = "${var.cluster_name}-pd"
key_name = var.ssh_key_name
instance_type = var.pd_instance_type
root_volume_size = "50"
public_ip = false
kubelet_extra_args = "--register-with-taints=dedicated=${var.cluster_name}-pd:NoSchedule --node-labels=dedicated=${var.cluster_name}-pd,pingcap.com/aws-local-ssd=true"
name = "${var.cluster_name}-pd"
key_name = var.ssh_key_name
instance_type = var.pd_instance_type
root_volume_size = "50"
public_ip = false
# the space separator is safe when the extra args is empty or prefixed by spaces (the same hereafter)
kubelet_extra_args = join(" ",
[
"--register-with-taints=dedicated=${var.cluster_name}-pd:NoSchedule",
"--node-labels=dedicated=${var.cluster_name}-pd,pingcap.com/aws-local-ssd=true,zone=${local.aws_zone_getter}",
lookup(var.group_kubelet_extra_args, "pd", var.kubelet_extra_args)
]
)
asg_desired_capacity = var.pd_count
asg_max_size = var.pd_count + 2
# additional_userdata = file("userdata.sh")
},
{
name = "${var.cluster_name}-tikv"
key_name = var.ssh_key_name
instance_type = var.tikv_instance_type
root_volume_size = "50"
public_ip = false
kubelet_extra_args = "--register-with-taints=dedicated=${var.cluster_name}-tikv:NoSchedule --node-labels=dedicated=${var.cluster_name}-tikv,pingcap.com/aws-local-ssd=true"
name = "${var.cluster_name}-tikv"
key_name = var.ssh_key_name
instance_type = var.tikv_instance_type
root_volume_size = "50"
public_ip = false
kubelet_extra_args = join(" ",
[
"--register-with-taints=dedicated=${var.cluster_name}-tikv:NoSchedule",
"--node-labels=dedicated=${var.cluster_name}-tikv,pingcap.com/aws-local-ssd=true,zone=${local.aws_zone_getter}",
lookup(var.group_kubelet_extra_args, "tikv", var.kubelet_extra_args)
]
)
asg_desired_capacity = var.tikv_count
asg_max_size = var.tikv_count + 2
pre_userdata = file("${path.module}/pre_userdata")
# additional_userdata = file("userdata.sh")
},
{
name = "${var.cluster_name}-tidb"
key_name = var.ssh_key_name
instance_type = var.tidb_instance_type
root_volume_type = "gp2"
root_volume_size = "50"
public_ip = false
kubelet_extra_args = "--register-with-taints=dedicated=${var.cluster_name}-tidb:NoSchedule --node-labels=dedicated=${var.cluster_name}-tidb"
name = "${var.cluster_name}-tidb"
key_name = var.ssh_key_name
instance_type = var.tidb_instance_type
root_volume_type = "gp2"
root_volume_size = "50"
public_ip = false
kubelet_extra_args = join(" ",
[
"--allowed-unsafe-sysctls=\\\"net.*\\\"",
"--register-with-taints=dedicated=${var.cluster_name}-tidb:NoSchedule",
"--node-labels=dedicated=${var.cluster_name}-tidb,zone=${local.aws_zone_getter}",
lookup(var.group_kubelet_extra_args, "tidb", var.kubelet_extra_args)
]
)
asg_desired_capacity = var.tidb_count
asg_max_size = var.tidb_count + 2
},
{
name = "${var.cluster_name}-monitor"
key_name = var.ssh_key_name
instance_type = var.monitor_instance_type
root_volume_type = "gp2"
root_volume_size = "50"
public_ip = false
asg_desired_capacity = 1
asg_max_size = 3
name = "${var.cluster_name}-monitor"
key_name = var.ssh_key_name
instance_type = var.monitor_instance_type
root_volume_type = "gp2"
root_volume_size = "50"
public_ip = false
kubelet_extra_args = join(" ",
[
"--node-labels=zone=${local.aws_zone_getter}",
lookup(var.group_kubelet_extra_args, "monitor", var.kubelet_extra_args)
]
)
asg_desired_capacity = 1
asg_max_size = 3
}
]

Expand Down
1 change: 1 addition & 0 deletions deploy/modules/aws/tidb-cluster/templates/userdata.sh.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ root hard nofile 1000000
root soft core unlimited
root soft stack 10240
EOF

# config docker ulimit
cp /usr/lib/systemd/system/docker.service /etc/systemd/system/docker.service
sed -i 's/LimitNOFILE=infinity/LimitNOFILE=1048576/' /etc/systemd/system/docker.service
Expand Down
11 changes: 11 additions & 0 deletions deploy/modules/aws/tidb-cluster/values/default.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,17 @@ tidb:
service.beta.kubernetes.io/aws-load-balancer-type: nlb
service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: 'true'
separateSlowLog: true
config: |
[log]
level = "info"
[performance]
tcp-keep-alive = true
podSecurityContext:
sysctls:
- name: net.ipv4.tcp_keepalive_time
value: "300"
- name: net.ipv4.tcp_keepalive_intvl
value: "75"
monitor:
storage: 100Gi
storageClassName: ebs-gp2
Expand Down
5 changes: 2 additions & 3 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -120,9 +120,8 @@ require (
k8s.io/apiserver v0.0.0-20190118115647-a748535592ba
k8s.io/cli-runtime v0.0.0-20190118125240-caee4253d968
k8s.io/client-go v2.0.0-alpha.0.0.20190115164855-701b91367003+incompatible
k8s.io/code-generator v0.0.0-20190808180452-d0071a119380
k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a // indirect; indirec
k8s.io/klog v0.3.1
k8s.io/code-generator v0.0.0-20190912042602-ebc0eb3a5c23
k8s.io/klog v0.4.0
k8s.io/kubernetes v1.12.5
k8s.io/metrics v0.0.0-20190118124808-33c1aed8dc65 // indirect
k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7 // indirect
Expand Down
24 changes: 15 additions & 9 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
Expand Down Expand Up @@ -117,7 +118,7 @@ github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g=
github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/gophercloud/gophercloud v0.3.0 h1:6sjpKIpVwRIIwmcEGp+WwNovNsem+c+2vm6oxshRpL8=
Expand Down Expand Up @@ -296,6 +297,8 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc h1:gkKoSkUmnU6bpS/VhkuO27bzQeSA51uaEfbOW5dNb68=
golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
Expand Down Expand Up @@ -325,6 +328,8 @@ golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59 h1:QjA/9ArTfVTLfEhClDCG7SGrZkZixxWpwNCDiwJfh88=
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac h1:MQEvx39qSf8vyrx3XRaOe+j1UDIzKwkYOVObRgGPVqI=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw=
gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
Expand Down Expand Up @@ -372,16 +377,16 @@ k8s.io/cli-runtime v0.0.0-20190118125240-caee4253d968 h1:VXLj8aMvJEo14Utv+knJDs0
k8s.io/cli-runtime v0.0.0-20190118125240-caee4253d968/go.mod h1:qWnH3/b8sp/l7EvlDh7ulDU3UWA4P4N1NFbEEP791tM=
k8s.io/client-go v2.0.0-alpha.0.0.20190115164855-701b91367003+incompatible h1:Qw/ADzXV2yX+39UUCwNcZmdNS4+sR+V2Jf9NBdZWlQg=
k8s.io/client-go v2.0.0-alpha.0.0.20190115164855-701b91367003+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
k8s.io/code-generator v0.0.0-20190808180452-d0071a119380 h1:OoA4SR2r8EBqlBvn5iOKSjaI5Oim4cOvbm92LJZiPZQ=
k8s.io/code-generator v0.0.0-20190808180452-d0071a119380/go.mod h1:yWQ6Ygojs0rLB0sAgl4OcQSi2sM7k20oNWn+7H9w+eA=
k8s.io/code-generator v0.0.0-20190912042602-ebc0eb3a5c23 h1:2oyDSO/D/4/bch5ZhL+sF5CPxO0GMrXhsIKFFOV6/uo=
k8s.io/code-generator v0.0.0-20190912042602-ebc0eb3a5c23/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a h1:QoHVuRquf80YZ+/bovwxoMO3Q/A3nt3yTgS0/0nejuk=
k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20190822140433-26a664648505 h1:ZY6yclUKVbZ+SdWnkfY+Je5vrMpKOxmGeKRbsXVmqYM=
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.1 h1:RVgyDHY/kFKtLqh67NvEWIgkMneNoIrdkN0CxDSQc68=
k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058 h1:di3XCwddOR9cWBNpfgXaskhh6cgJuwcK54rvtwUaC10=
k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4=
k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ=
k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf h1:EYm5AW/UUDbnmnI+gK0TJDVK9qPLhM+sRHYanNKw0EQ=
k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
k8s.io/kubernetes v1.12.5 h1:pdQvCJZPGRNVS3CaajKuoPCZKreQaglbRcXwkDwR598=
k8s.io/kubernetes v1.12.5/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/metrics v0.0.0-20190118124808-33c1aed8dc65 h1:0VelqHP6rojigdeX7EfWJ26OCw7PSvCGz7xGg7ukN8U=
Expand All @@ -394,5 +399,6 @@ modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03
modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
vbom.ml/util v0.0.0-20180919145318-efcd4e0f9787 h1:O69FD9pJA4WUZlEwYatBEEkRWKQ5cKodWpdKTrCS/iQ=
vbom.ml/util v0.0.0-20180919145318-efcd4e0f9787/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI=
38 changes: 18 additions & 20 deletions pkg/apis/pingcap.com/v1alpha1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,24 +106,16 @@ type TidbClusterStatus struct {
// PDSpec contains details of PD member
type PDSpec struct {
ContainerSpec
Replicas int32 `json:"replicas"`
Affinity *corev1.Affinity `json:"affinity,omitempty"`
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
HostNetwork bool `json:"hostNetwork,omitempty"`
StorageClassName string `json:"storageClassName,omitempty"`
PodAttributesSpec
Replicas int32 `json:"replicas"`
StorageClassName string `json:"storageClassName,omitempty"`
}

// TiDBSpec contains details of PD member
type TiDBSpec struct {
ContainerSpec
PodAttributesSpec
Replicas int32 `json:"replicas"`
Affinity *corev1.Affinity `json:"affinity,omitempty"`
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
HostNetwork bool `json:"hostNetwork,omitempty"`
StorageClassName string `json:"storageClassName,omitempty"`
BinlogEnabled bool `json:"binlogEnabled,omitempty"`
MaxFailoverCount int32 `json:"maxFailoverCount,omitempty"`
Expand All @@ -139,14 +131,10 @@ type TiDBSlowLogTailerSpec struct {
// TiKVSpec contains details of PD member
type TiKVSpec struct {
ContainerSpec
Replicas int32 `json:"replicas"`
Affinity *corev1.Affinity `json:"affinity,omitempty"`
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
HostNetwork bool `json:"hostNetwork,omitempty"`
Privileged bool `json:"privileged,omitempty"`
StorageClassName string `json:"storageClassName,omitempty"`
PodAttributesSpec
Replicas int32 `json:"replicas"`
Privileged bool `json:"privileged,omitempty"`
StorageClassName string `json:"storageClassName,omitempty"`
}

// TiKVPromGatewaySpec runs as a sidecar with TiKVSpec
Expand All @@ -162,6 +150,16 @@ type ContainerSpec struct {
Limits *ResourceRequirement `json:"limits,omitempty"`
}

// PodAttributesControlSpec is a spec of some general attributes of TiKV, TiDB and PD Pods
type PodAttributesSpec struct {
Affinity *corev1.Affinity `json:"affinity,omitempty"`
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
HostNetwork bool `json:"hostNetwork,omitempty"`
PodSecurityContext *corev1.PodSecurityContext `json:"podSecurityContext,omitempty"`
}

// Service represent service type used in TidbCluster
type Service struct {
Name string `json:"name,omitempty"`
Expand Down
Loading