Skip to content

Commit

Permalink
create tidb cluster with cr on aws (#2004)
Browse files Browse the repository at this point in the history
  • Loading branch information
DanielZhangQD authored Mar 23, 2020
1 parent 856b03a commit d262906
Show file tree
Hide file tree
Showing 4 changed files with 226 additions and 31 deletions.
59 changes: 29 additions & 30 deletions deploy/aws/clusters.tf
Original file line number Diff line number Diff line change
Expand Up @@ -17,25 +17,24 @@ provider "helm" {
}

# TiDB cluster declaration example
#module "example-cluster" {
# source = "./tidb-cluster"
# eks_info = local.default_eks
# subnets = local.default_subnets
#
# # NOTE: cluster_name cannot be changed after creation
# cluster_name = "demo-cluster"
# cluster_version = "v3.0.8"
# ssh_key_name = module.key-pair.key_name
# pd_count = 1
# pd_instance_type = "t2.xlarge"
# tikv_count = 1
# tikv_instance_type = "t2.xlarge"
# tidb_count = 1
# tidb_instance_type = "t2.xlarge"
# monitor_instance_type = "t2.xlarge"
# # yaml file that passed to helm to customize the release
# override_values = file("values/example.yaml")
#}
# module example-cluster {
# source = "../modules/aws/tidb-cluster"

# eks = local.eks
# subnets = local.subnets
# region = var.region
# cluster_name = "example"

# ssh_key_name = module.key-pair.key_name
# pd_count = 1
# pd_instance_type = "c5.large"
# tikv_count = 1
# tikv_instance_type = "c5d.large"
# tidb_count = 1
# tidb_instance_type = "c4.large"
# monitor_instance_type = "c5.large"
# create_tidb_cluster_release = false
# }

module "default-cluster" {
providers = {
Expand All @@ -46,15 +45,15 @@ module "default-cluster" {
subnets = local.subnets
region = var.region

cluster_name = var.default_cluster_name
cluster_version = var.default_cluster_version
ssh_key_name = module.key-pair.key_name
pd_count = var.default_cluster_pd_count
pd_instance_type = var.default_cluster_pd_instance_type
tikv_count = var.default_cluster_tikv_count
tikv_instance_type = var.default_cluster_tikv_instance_type
tidb_count = var.default_cluster_tidb_count
tidb_instance_type = var.default_cluster_tidb_instance_type
monitor_instance_type = var.default_cluster_monitor_instance_type
override_values = file("default-cluster.yaml")
cluster_name = var.default_cluster_name
cluster_version = var.default_cluster_version
ssh_key_name = module.key-pair.key_name
pd_count = var.default_cluster_pd_count
pd_instance_type = var.default_cluster_pd_instance_type
tikv_count = var.default_cluster_tikv_count
tikv_instance_type = var.default_cluster_tikv_instance_type
tidb_count = var.default_cluster_tidb_count
tidb_instance_type = var.default_cluster_tidb_instance_type
monitor_instance_type = var.default_cluster_monitor_instance_type
create_tidb_cluster_release = var.create_tidb_cluster_release
}
84 changes: 84 additions & 0 deletions deploy/aws/manifests/db-monitor.yaml.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
apiVersion: pingcap.com/v1alpha1
kind: TidbMonitor
metadata:
name: CLUSTER_NAME
spec:
alertmanagerURL: ""
annotations: {}
clusters:
- name: CLUSTER_NAME
grafana:
baseImage: grafana/grafana
envs:
# Configure Grafana using environment variables except GF_PATHS_DATA, GF_SECURITY_ADMIN_USER and GF_SECURITY_ADMIN_PASSWORD
# Ref https://grafana.com/docs/installation/configuration/#using-environment-variables
GF_AUTH_ANONYMOUS_ENABLED: "true"
GF_AUTH_ANONYMOUS_ORG_NAME: "Main Org."
GF_AUTH_ANONYMOUS_ORG_ROLE: "Viewer"
# if grafana is running behind a reverse proxy with subpath http://foo.bar/grafana
# GF_SERVER_DOMAIN: foo.bar
# GF_SERVER_ROOT_URL: "%(protocol)s://%(domain)s/grafana/"
imagePullPolicy: IfNotPresent
logLevel: info
password: admin
resources: {}
# limits:
# cpu: 8000m
# memory: 8Gi
# requests:
# cpu: 4000m
# memory: 4Gi
service:
portName: http-grafana
type: LoadBalancer
username: admin
version: 6.0.1
imagePullPolicy: IfNotPresent
initializer:
baseImage: pingcap/tidb-monitor-initializer
imagePullPolicy: IfNotPresent
resources: {}
# limits:
# cpu: 50m
# memory: 64Mi
# requests:
# cpu: 50m
# memory: 64Mi
version: v3.0.12
kubePrometheusURL: ""
nodeSelector: {}
persistent: true
prometheus:
baseImage: prom/prometheus
imagePullPolicy: IfNotPresent
logLevel: info
reserveDays: 12
resources: {}
# limits:
# cpu: 8000m
# memory: 8Gi
# requests:
# cpu: 4000m
# memory: 4Gi
service:
portName: http-prometheus
type: NodePort
version: v2.11.1
reloader:
baseImage: pingcap/tidb-monitor-reloader
imagePullPolicy: IfNotPresent
resources: {}
# limits:
# cpu: 50m
# memory: 64Mi
# requests:
# cpu: 50m
# memory: 64Mi
service:
portName: tcp-reloader
type: NodePort
version: v1.0.1
storage: 100Gi
storageClassName: ebs-gp2
tolerations: []

108 changes: 108 additions & 0 deletions deploy/aws/manifests/db.yaml.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
apiVersion: pingcap.com/v1alpha1
kind: TidbCluster
metadata:
name: CLUSTER_NAME
spec:
configUpdateStrategy: RollingUpdate
enableTLSCluster: false
helper:
image: busybox:1.31.1
hostNetwork: false
imagePullPolicy: IfNotPresent
pd:
affinity: {}
baseImage: pingcap/pd
config:
log:
level: info
replication:
location-labels:
- zone
max-replicas: 3
nodeSelector:
dedicated: CLUSTER_NAME-pd
podSecurityContext: {}
replicas: 3
requests:
cpu: "1"
memory: 400Mi
storage: 1Gi
storageClassName: ebs-gp2
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: CLUSTER_NAME-pd
pvReclaimPolicy: Retain
schedulerName: tidb-scheduler
tidb:
affinity: {}
baseImage: pingcap/tidb
config:
log:
level: info
performance:
max-procs: 0
tcp-keep-alive: true
enableTLSClient: false
maxFailoverCount: 3
nodeSelector:
dedicated: CLUSTER_NAME-tidb
podSecurityContext:
sysctls:
- name: net.ipv4.tcp_keepalive_time
value: "300"
- name: net.ipv4.tcp_keepalive_intvl
value: "75"
- name: net.core.somaxconn
value: "32768"
replicas: 2
requests:
cpu: "1"
memory: 400Mi
separateSlowLog: true
service:
annotations:
service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: 'true'
service.beta.kubernetes.io/aws-load-balancer-internal: '0.0.0.0/0'
service.beta.kubernetes.io/aws-load-balancer-type: nlb
exposeStatus: true
externalTrafficPolicy: Local
type: LoadBalancer
slowLogTailer:
limits:
cpu: 100m
memory: 50Mi
requests:
cpu: 20m
memory: 5Mi
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: CLUSTER_NAME-tidb
tikv:
affinity: {}
baseImage: pingcap/tikv
config:
log-level: info
hostNetwork: false
maxFailoverCount: 3
nodeSelector:
dedicated: CLUSTER_NAME-tikv
podSecurityContext: {}
privileged: false
replicas: 3
requests:
cpu: "1"
memory: 2Gi
storage: 45Gi
storageClassName: local-storage
tolerations:
- effect: NoSchedule
key: dedicated
operator: Equal
value: CLUSTER_NAME-tikv
timezone: UTC
version: v3.0.12

6 changes: 5 additions & 1 deletion deploy/aws/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ variable "eks_version" {

variable "operator_version" {
description = "TiDB operator version"
default = "v1.0.6"
default = "v1.1.0"
}

variable "operator_values" {
Expand Down Expand Up @@ -115,3 +115,7 @@ variable "default_cluster_name" {
default = "my-cluster"
}

variable "create_tidb_cluster_release" {
description = "whether creating tidb-cluster helm release"
default = false
}

0 comments on commit d262906

Please sign in to comment.