Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

create node pool for tiflash and cdc (#2413) #2419

Merged
merged 1 commit into from
May 11, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletion deploy/aws/clusters.tf
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ provider "helm" {

module "default-cluster" {
providers = {
helm = "helm.eks"
helm = helm.eks
}
source = "../modules/aws/tidb-cluster"
eks = local.eks
Expand All @@ -56,4 +56,10 @@ module "default-cluster" {
tidb_instance_type = var.default_cluster_tidb_instance_type
monitor_instance_type = var.default_cluster_monitor_instance_type
create_tidb_cluster_release = var.create_tidb_cluster_release
create_tiflash_node_pool = var.create_tiflash_node_pool
create_cdc_node_pool = var.create_cdc_node_pool
tiflash_count = var.cluster_tiflash_count
cdc_count = var.cluster_cdc_count
cdc_instance_type = var.cluster_cdc_instance_type
tiflash_instance_type = var.cluster_tiflash_instance_type
}
26 changes: 26 additions & 0 deletions deploy/aws/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -119,3 +119,29 @@ variable "create_tidb_cluster_release" {
description = "whether creating tidb-cluster helm release"
default = false
}

variable "create_tiflash_node_pool" {
description = "whether creating node pool for tiflash"
default = false
}

variable "create_cdc_node_pool" {
description = "whether creating node pool for cdc"
default = false
}

variable "cluster_tiflash_count" {
default = 2
}

variable "cluster_cdc_count" {
default = 3
}

variable "cluster_cdc_instance_type" {
default = "c5.2xlarge"
}

variable "cluster_tiflash_instance_type" {
default = "i3.4xlarge"
}
44 changes: 43 additions & 1 deletion deploy/modules/aws/tidb-cluster/local.tf
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,11 @@ locals {
# 169.254.169.254 is the authoritative AWS metadata server, see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html
aws_zone_getter = "$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone)"

tidb_cluster_worker_groups = [
tidb_cluster_worker_groups = [for group in local.tidb_cluster_worker_groups_raw : group if group.enable]
tidb_cluster_worker_groups_raw = [
{
name = "${var.cluster_name}-pd"
enable = true
key_name = var.ssh_key_name
instance_type = var.pd_instance_type
root_volume_size = "50"
Expand All @@ -59,6 +61,7 @@ locals {
},
{
name = "${var.cluster_name}-tikv"
enable = true
key_name = var.ssh_key_name
instance_type = var.tikv_instance_type
root_volume_size = "50"
Expand All @@ -78,6 +81,7 @@ locals {
},
{
name = "${var.cluster_name}-tidb"
enable = true
key_name = var.ssh_key_name
instance_type = var.tidb_instance_type
root_volume_type = "gp2"
Expand All @@ -96,6 +100,7 @@ locals {
},
{
name = "${var.cluster_name}-monitor"
enable = true
key_name = var.ssh_key_name
instance_type = var.monitor_instance_type
root_volume_type = "gp2"
Expand All @@ -109,6 +114,43 @@ locals {
)
asg_desired_capacity = 1
asg_max_size = 3
},
{
name = "${var.cluster_name}-tiflash"
enable = var.create_tiflash_node_pool
key_name = var.ssh_key_name
instance_type = var.tiflash_instance_type
root_volume_size = "50"
public_ip = false
kubelet_extra_args = join(" ",
[
"--register-with-taints=dedicated=${var.cluster_name}-tiflash:NoSchedule",
"--node-labels=dedicated=${var.cluster_name}-tiflash,pingcap.com/aws-local-ssd=true,zone=${local.aws_zone_getter}",
lookup(var.group_kubelet_extra_args, "tiflash", var.kubelet_extra_args)
]
)
asg_desired_capacity = var.tiflash_count
asg_max_size = var.tiflash_count + 2
pre_userdata = file("${path.module}/pre_userdata")
suspended_processes = ["ReplaceUnhealthy"]
},
{
name = "${var.cluster_name}-cdc"
enable = var.create_cdc_node_pool
key_name = var.ssh_key_name
instance_type = var.cdc_instance_type
root_volume_size = "50"
public_ip = false
# the space separator is safe when the extra args is empty or prefixed by spaces (the same hereafter)
kubelet_extra_args = join(" ",
[
"--register-with-taints=dedicated=${var.cluster_name}-cdc:NoSchedule",
"--node-labels=dedicated=${var.cluster_name}-cdc,zone=${local.aws_zone_getter}",
lookup(var.group_kubelet_extra_args, "cdc", var.kubelet_extra_args)
]
)
asg_desired_capacity = var.cdc_count
asg_max_size = var.cdc_count + 2
}
]

Expand Down
26 changes: 26 additions & 0 deletions deploy/modules/aws/tidb-cluster/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -162,3 +162,29 @@ variable "group_kubelet_extra_args" {
type = map(string)
default = {}
}

variable "create_tiflash_node_pool" {
description = "whether creating node pool for tiflash"
default = false
}

variable "create_cdc_node_pool" {
description = "whether creating node pool for cdc"
default = false
}

variable "tiflash_count" {
default = 2
}

variable "cdc_count" {
default = 3
}

variable "cdc_instance_type" {
default = "c5.2xlarge"
}

variable "tiflash_instance_type" {
default = "i3.4xlarge"
}
14 changes: 7 additions & 7 deletions deploy/modules/aws/tidb-operator/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -38,19 +38,19 @@ resource "local_file" "kubeconfig" {
}

locals {
kubeconfig = "${var.config_output_path}kubeconfig"
kubeconfig = "${var.config_output_path}kubeconfig"
}

resource "null_resource" "kubeconfig" {
provisioner "local-exec" {
command = <<EOS
command = <<EOS
echo "${local_file.kubeconfig.sensitive_content}" > "${local.kubeconfig}"
EOS
}
}

provider "helm" {
alias = "initial"
alias = "initial"
insecure = true
# service_account = "tiller"
install_tiller = false # currently this doesn't work, so we install tiller in the local-exec provisioner. See https://github.com/terraform-providers/terraform-provider-helm/issues/148
Expand All @@ -66,7 +66,7 @@ resource "null_resource" "setup-env" {

provisioner "local-exec" {
working_dir = path.cwd
command = <<EOS
command = <<EOS
echo "${local_file.kubeconfig.sensitive_content}" > kube_config.yaml
kubectl apply -f https://raw.githubusercontent.com/pingcap/tidb-operator/${var.operator_version}/manifests/crd.yaml
kubectl apply -f https://raw.githubusercontent.com/pingcap/tidb-operator/${var.operator_version}/manifests/tiller-rbac.yaml
Expand All @@ -86,14 +86,14 @@ EOS
}

data "helm_repository" "pingcap" {
provider = "helm.initial"
depends_on = ["null_resource.setup-env"]
provider = helm.initial
depends_on = [null_resource.setup-env]
name = "pingcap"
url = "http://charts.pingcap.org/"
}

resource "helm_release" "tidb-operator" {
provider = "helm.initial"
provider = helm.initial
depends_on = [null_resource.setup-env, local_file.kubeconfig]

repository = data.helm_repository.pingcap.name
Expand Down