From bd2343f6d2c2a2a6bc5a701affec179a47d380dc Mon Sep 17 00:00:00 2001 From: tennix Date: Mon, 17 Jun 2019 20:05:26 +0800 Subject: [PATCH 01/11] separate worker node group from master control node group --- deploy/aws/.gitignore | 1 + deploy/aws/aws-key-pair/main.tf | 43 ++ deploy/aws/aws-key-pair/outputs.tf | 20 + deploy/aws/aws-key-pair/variables.tf | 8 + deploy/aws/aws-key-pair/versions.tf | 4 + deploy/aws/aws-tutorial.tfvars | 11 - deploy/aws/charts/tidb-cluster | 1 - deploy/aws/charts/tidb-operator | 1 - deploy/aws/clusters.tf | 69 +++ deploy/aws/data.tf | 54 +-- deploy/aws/eks/aws_auth.tf | 127 ++++++ deploy/aws/eks/cluster.tf | 113 +++++ deploy/aws/eks/data.tf | 154 +++++++ deploy/aws/eks/kubectl.tf | 6 + deploy/aws/eks/local.tf | 252 +++++++++++ deploy/aws/eks/manifests/crd.yaml | 103 +++++ .../{ => eks}/manifests/gp2-storageclass.yaml | 0 .../manifests/local-volume-provisioner.yaml | 0 deploy/aws/eks/manifests/tiller-rbac.yaml | 18 + deploy/aws/eks/outputs.tf | 98 +++++ .../config-map-aws-auth-map_accounts.yaml.tpl | 1 + .../config-map-aws-auth-map_roles.yaml.tpl | 4 + .../config-map-aws-auth-map_users.yaml.tpl | 4 + .../templates/config-map-aws-auth.yaml.tpl | 13 + deploy/aws/eks/templates/kubeconfig.tpl | 28 ++ deploy/aws/eks/templates/userdata.sh.tpl | 10 + deploy/aws/eks/templates/worker-role.tpl | 5 + deploy/aws/eks/variables.tf | 262 +++++++++++ deploy/aws/eks/workers.tf | 381 ++++++++++++++++ deploy/aws/eks/workers_launch_template.tf | 342 +++++++++++++++ deploy/aws/main.tf | 243 ++--------- deploy/aws/manifests/crd.yaml | 1 - deploy/aws/manifests/tiller-rbac.yaml | 1 - deploy/aws/outputs.tf | 56 +-- .../templates/tidb-cluster-values.yaml.tpl | 408 ------------------ deploy/aws/tidb-cluster/cluster.tf | 86 ++++ deploy/aws/tidb-cluster/data.tf | 83 ++++ deploy/aws/tidb-cluster/local.tf | 273 ++++++++++++ deploy/aws/tidb-cluster/outputs.tf | 7 + .../tidb-cluster/templates/userdata.sh.tpl | 24 ++ deploy/aws/tidb-cluster/values/default.yaml | 9 + deploy/aws/tidb-cluster/variables.tf | 244 +++++++++++ deploy/aws/tidb-cluster/workers.tf | 190 ++++++++ .../tidb-cluster/workers_launch_template.tf | 342 +++++++++++++++ deploy/aws/variables.tf | 102 ++--- 45 files changed, 3412 insertions(+), 790 deletions(-) create mode 100644 deploy/aws/aws-key-pair/main.tf create mode 100644 deploy/aws/aws-key-pair/outputs.tf create mode 100644 deploy/aws/aws-key-pair/variables.tf create mode 100644 deploy/aws/aws-key-pair/versions.tf delete mode 100644 deploy/aws/aws-tutorial.tfvars delete mode 120000 deploy/aws/charts/tidb-cluster delete mode 120000 deploy/aws/charts/tidb-operator create mode 100644 deploy/aws/clusters.tf create mode 100644 deploy/aws/eks/aws_auth.tf create mode 100644 deploy/aws/eks/cluster.tf create mode 100644 deploy/aws/eks/data.tf create mode 100644 deploy/aws/eks/kubectl.tf create mode 100644 deploy/aws/eks/local.tf create mode 100644 deploy/aws/eks/manifests/crd.yaml rename deploy/aws/{ => eks}/manifests/gp2-storageclass.yaml (100%) rename deploy/aws/{ => eks}/manifests/local-volume-provisioner.yaml (100%) create mode 100644 deploy/aws/eks/manifests/tiller-rbac.yaml create mode 100644 deploy/aws/eks/outputs.tf create mode 100644 deploy/aws/eks/templates/config-map-aws-auth-map_accounts.yaml.tpl create mode 100644 deploy/aws/eks/templates/config-map-aws-auth-map_roles.yaml.tpl create mode 100644 deploy/aws/eks/templates/config-map-aws-auth-map_users.yaml.tpl create mode 100644 deploy/aws/eks/templates/config-map-aws-auth.yaml.tpl create mode 100644 deploy/aws/eks/templates/kubeconfig.tpl create mode 100644 deploy/aws/eks/templates/userdata.sh.tpl create mode 100644 deploy/aws/eks/templates/worker-role.tpl create mode 100644 deploy/aws/eks/variables.tf create mode 100644 deploy/aws/eks/workers.tf create mode 100644 deploy/aws/eks/workers_launch_template.tf delete mode 120000 deploy/aws/manifests/crd.yaml delete mode 120000 deploy/aws/manifests/tiller-rbac.yaml delete mode 100644 deploy/aws/templates/tidb-cluster-values.yaml.tpl create mode 100644 deploy/aws/tidb-cluster/cluster.tf create mode 100644 deploy/aws/tidb-cluster/data.tf create mode 100644 deploy/aws/tidb-cluster/local.tf create mode 100644 deploy/aws/tidb-cluster/outputs.tf create mode 100644 deploy/aws/tidb-cluster/templates/userdata.sh.tpl create mode 100644 deploy/aws/tidb-cluster/values/default.yaml create mode 100644 deploy/aws/tidb-cluster/variables.tf create mode 100644 deploy/aws/tidb-cluster/workers.tf create mode 100644 deploy/aws/tidb-cluster/workers_launch_template.tf diff --git a/deploy/aws/.gitignore b/deploy/aws/.gitignore index 2fa90f5444..e460c42302 100644 --- a/deploy/aws/.gitignore +++ b/deploy/aws/.gitignore @@ -3,3 +3,4 @@ credentials/ terraform.tfstate terraform.tfstate.backup .terraform.tfstate.lock.info +kubeconfig_*.yaml \ No newline at end of file diff --git a/deploy/aws/aws-key-pair/main.tf b/deploy/aws/aws-key-pair/main.tf new file mode 100644 index 0000000000..875f0a7d22 --- /dev/null +++ b/deploy/aws/aws-key-pair/main.tf @@ -0,0 +1,43 @@ +locals { + public_key_filename = "${var.path}/${var.name}.pub" + private_key_filename = "${var.path}/${var.name}.pem" +} + +resource "tls_private_key" "generated" { + algorithm = "RSA" +} + +resource "aws_key_pair" "generated" { + key_name = var.name + public_key = tls_private_key.generated.public_key_openssh + + lifecycle { + ignore_changes = [key_name] + } +} + +resource "local_file" "public_key_openssh" { + count = var.path != "" ? 1 : 0 + content = tls_private_key.generated.public_key_openssh + filename = local.public_key_filename +} + +resource "local_file" "private_key_pem" { + count = var.path != "" ? 1 : 0 + content = tls_private_key.generated.private_key_pem + filename = local.private_key_filename +} + +resource "null_resource" "chmod" { + count = var.path != "" ? 1 : 0 + depends_on = [local_file.private_key_pem] + + triggers = { + key = tls_private_key.generated.private_key_pem + } + + provisioner "local-exec" { + command = "chmod 600 ${local.private_key_filename}" + } +} + diff --git a/deploy/aws/aws-key-pair/outputs.tf b/deploy/aws/aws-key-pair/outputs.tf new file mode 100644 index 0000000000..da32317b7e --- /dev/null +++ b/deploy/aws/aws-key-pair/outputs.tf @@ -0,0 +1,20 @@ +output "key_name" { + value = aws_key_pair.generated.key_name +} + +output "public_key_openssh" { + value = tls_private_key.generated.public_key_openssh +} + +output "private_key_pem" { + value = tls_private_key.generated.private_key_pem +} + +output "public_key_filepath" { + value = local.public_key_filename +} + +output "private_key_filepath" { + value = local.private_key_filename +} + diff --git a/deploy/aws/aws-key-pair/variables.tf b/deploy/aws/aws-key-pair/variables.tf new file mode 100644 index 0000000000..6f392c3440 --- /dev/null +++ b/deploy/aws/aws-key-pair/variables.tf @@ -0,0 +1,8 @@ +variable "name" { + description = "Unique name for the key, should also be a valid filename. This will prefix the public/private key." +} + +variable "path" { + description = "Path to a directory where the public and private key will be stored." + default = "" +} diff --git a/deploy/aws/aws-key-pair/versions.tf b/deploy/aws/aws-key-pair/versions.tf new file mode 100644 index 0000000000..ac97c6ac8e --- /dev/null +++ b/deploy/aws/aws-key-pair/versions.tf @@ -0,0 +1,4 @@ + +terraform { + required_version = ">= 0.12" +} diff --git a/deploy/aws/aws-tutorial.tfvars b/deploy/aws/aws-tutorial.tfvars deleted file mode 100644 index beba5bdc9c..0000000000 --- a/deploy/aws/aws-tutorial.tfvars +++ /dev/null @@ -1,11 +0,0 @@ -pd_instance_type = "c5d.large" -tikv_instance_type = "c5d.large" -tidb_instance_type = "c4.large" -monitor_instance_type = "c5.large" - -pd_count = 1 -tikv_count = 1 -tidb_count = 1 - -cluster_name = "aws_tutorial" -tikv_root_volume_size = "50" \ No newline at end of file diff --git a/deploy/aws/charts/tidb-cluster b/deploy/aws/charts/tidb-cluster deleted file mode 120000 index 326d382104..0000000000 --- a/deploy/aws/charts/tidb-cluster +++ /dev/null @@ -1 +0,0 @@ -../../../charts/tidb-cluster \ No newline at end of file diff --git a/deploy/aws/charts/tidb-operator b/deploy/aws/charts/tidb-operator deleted file mode 120000 index a45f172da2..0000000000 --- a/deploy/aws/charts/tidb-operator +++ /dev/null @@ -1 +0,0 @@ -../../../charts/tidb-operator \ No newline at end of file diff --git a/deploy/aws/clusters.tf b/deploy/aws/clusters.tf new file mode 100644 index 0000000000..5c56a766a4 --- /dev/null +++ b/deploy/aws/clusters.tf @@ -0,0 +1,69 @@ +module "demo-cluster" { + source = "./tidb-cluster" + eks_info = module.eks.eks_info + subnets = split( + ",", + var.create_vpc ? join(",", module.vpc.private_subnets) : join(",", var.subnets), + ) + + cluster_name = "demo-cluster" + cluster_version = "v3.0.0-rc.2" + ssh_key_name = module.key-pair.key_name + pd_count = 1 + pd_instance_type = "t2.xlarge" + tikv_count = 1 + tikv_instance_type = "t2.xlarge" + tidb_count = 1 + tidb_instance_type = "t2.xlarge" + monitor_instance_type = "t2.xlarge" + monitor_storage_size = "100Gi" + monitor_enable_anonymous_user = true + override_values = "values/default.yaml" +} + +module "test-cluster" { + source = "./tidb-cluster" + eks_info = module.eks.eks_info + subnets = split( + ",", + var.create_vpc ? join(",", module.vpc.private_subnets) : join(",", var.subnets), + ) + + cluster_name = "test-cluster" + cluster_version = "v3.0.0-rc.1" + ssh_key_name = module.key-pair.key_name + pd_count = 1 + pd_instance_type = "t2.xlarge" + tikv_count = 1 + tikv_instance_type = "t2.xlarge" + tidb_count = 1 + tidb_instance_type = "t2.xlarge" + monitor_instance_type = "t2.xlarge" + monitor_storage_size = "100Gi" + monitor_enable_anonymous_user = true + override_values = "values/default.yaml" +} + +module "prod-cluster" { + source = "./tidb-cluster" + eks_info = module.eks.eks_info + subnets = ["subnet-0043bd7c0ce42020b"] + # subnets = split( + # ",", + # var.create_vpc ? join(",", module.vpc.private_subnets) : join(",", var.subnets), + # ) + + cluster_name = "prod-cluster" + cluster_version = "v3.0.0-rc.1" + ssh_key_name = module.key-pair.key_name + pd_count = 1 + pd_instance_type = "t2.xlarge" + tikv_count = 3 + tikv_instance_type = "t2.xlarge" + tidb_count = 1 + tidb_instance_type = "t2.xlarge" + monitor_instance_type = "t2.xlarge" + monitor_storage_size = "100Gi" + monitor_enable_anonymous_user = true + override_values = "values/default.yaml" +} diff --git a/deploy/aws/data.tf b/deploy/aws/data.tf index 22924f5d8b..054c461d3a 100644 --- a/deploy/aws/data.tf +++ b/deploy/aws/data.tf @@ -1,53 +1,13 @@ -data "aws_availability_zones" "available" {} +data "aws_availability_zones" "available" { +} data "aws_ami" "amazon-linux-2" { - most_recent = true - - owners = ["amazon"] + most_recent = true - filter { - name = "name" - values = ["amzn2-ami-hvm-*-x86_64-gp2"] - } -} + owners = ["amazon"] -data "template_file" "tidb_cluster_values" { - template = "${file("${path.module}/templates/tidb-cluster-values.yaml.tpl")}" - vars { - cluster_version = "${var.tidb_version}" - pd_replicas = "${var.pd_count}" - tikv_replicas = "${var.tikv_count}" - tidb_replicas = "${var.tidb_count}" - monitor_enable_anonymous_user = "${var.monitor_enable_anonymous_user}" + filter { + name = "name" + values = ["amzn2-ami-hvm-*-x86_64-gp2"] } } - -# kubernetes provider can't use computed config_path right now, see issue: -# https://github.com/terraform-providers/terraform-provider-kubernetes/issues/142 -# so we don't use kubernetes provider to retrieve tidb and monitor connection info, -# instead we use external data source. -# data "kubernetes_service" "tidb" { -# depends_on = ["helm_release.tidb-cluster"] -# metadata { -# name = "tidb-cluster-tidb" -# namespace = "tidb" -# } -# } - -# data "kubernetes_service" "monitor" { -# depends_on = ["helm_release.tidb-cluster"] -# metadata { -# name = "tidb-cluster-grafana" -# namespace = "tidb" -# } -# } - -data "external" "tidb_service" { - depends_on = ["null_resource.wait-tidb-ready"] - program = ["bash", "-c", "kubectl --kubeconfig credentials/kubeconfig_${var.cluster_name} get svc -n tidb tidb-cluster-tidb -ojson | jq '.status.loadBalancer.ingress[0]'"] -} - -data "external" "monitor_service" { - depends_on = ["null_resource.wait-tidb-ready"] - program = ["bash", "-c", "kubectl --kubeconfig credentials/kubeconfig_${var.cluster_name} get svc -n tidb tidb-cluster-grafana -ojson | jq '.status.loadBalancer.ingress[0]'"] -} diff --git a/deploy/aws/eks/aws_auth.tf b/deploy/aws/eks/aws_auth.tf new file mode 100644 index 0000000000..6fcb643955 --- /dev/null +++ b/deploy/aws/eks/aws_auth.tf @@ -0,0 +1,127 @@ +resource "local_file" "config_map_aws_auth" { + content = data.template_file.config_map_aws_auth.rendered + filename = "${var.config_output_path}config-map-aws-auth_${var.cluster_name}.yaml" + count = var.write_aws_auth_config ? 1 : 0 +} + +resource "null_resource" "update_config_map_aws_auth" { + depends_on = [aws_eks_cluster.this] + + provisioner "local-exec" { + working_dir = path.module + + command = < kube_config.yaml + echo "${null_resource.update_config_map_aws_auth[0].triggers.config_map_rendered}" > aws_auth_configmap.yaml + kubectl apply -f aws_auth_configmap.yaml --kubeconfig kube_config.yaml && break || sleep 10 +done +kubectl apply -f manifests/crd.yaml +kubectl apply -f manifests/local-volume-provisioner.yaml +kubectl apply -f manifests/gp2-storageclass.yaml +kubectl apply -f manifests/tiller-rbac.yaml +helm init --service-account tiller --upgrade --wait +until helm ls; do + echo "Wait tiller ready" +done +helm repo add pingcap http://charts.pingcap.org/ +helm upgrade --install tidb-operator pingcap/tidb-operator --version=${var.operator_version} --namespace=tidb-admin --wait +helm version +rm aws_auth_configmap.yaml kube_config.yaml +EOT + + interpreter = var.local_exec_interpreter + environment = { + KUBECONFIG = "kube_config.yaml" + } + } + + triggers = { + # timestamp = timestamp() + kube_config_map_rendered = data.template_file.kubeconfig.rendered + config_map_rendered = data.template_file.config_map_aws_auth.rendered + endpoint = aws_eks_cluster.this.endpoint + } + + count = var.manage_aws_auth ? 1 : 0 +} + +data "aws_caller_identity" "current" { +} + +data "template_file" "launch_template_worker_role_arns" { + count = var.worker_group_launch_template_count + template = file("${path.module}/templates/worker-role.tpl") + + vars = { + worker_role_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/${element( + aws_iam_instance_profile.workers_launch_template.*.role, + count.index, + )}" + } +} + +data "template_file" "worker_role_arns" { + count = var.worker_group_count + template = file("${path.module}/templates/worker-role.tpl") + + vars = { + worker_role_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/${element(aws_iam_instance_profile.workers.*.role, count.index)}" + } +} + +data "template_file" "config_map_aws_auth" { + template = file("${path.module}/templates/config-map-aws-auth.yaml.tpl") + + vars = { + worker_role_arn = join( + "", + distinct( + concat( + data.template_file.launch_template_worker_role_arns.*.rendered, + data.template_file.worker_role_arns.*.rendered, + ), + ), + ) + map_users = join("", data.template_file.map_users.*.rendered) + map_roles = join("", data.template_file.map_roles.*.rendered) + map_accounts = join("", data.template_file.map_accounts.*.rendered) + } +} + +data "template_file" "map_users" { + count = var.map_users_count + template = file( + "${path.module}/templates/config-map-aws-auth-map_users.yaml.tpl", + ) + + vars = { + user_arn = var.map_users[count.index]["user_arn"] + username = var.map_users[count.index]["username"] + group = var.map_users[count.index]["group"] + } +} + +data "template_file" "map_roles" { + count = var.map_roles_count + template = file( + "${path.module}/templates/config-map-aws-auth-map_roles.yaml.tpl", + ) + + vars = { + role_arn = var.map_roles[count.index]["role_arn"] + username = var.map_roles[count.index]["username"] + group = var.map_roles[count.index]["group"] + } +} + +data "template_file" "map_accounts" { + count = var.map_accounts_count + template = file( + "${path.module}/templates/config-map-aws-auth-map_accounts.yaml.tpl", + ) + + vars = { + account_number = element(var.map_accounts, count.index) + } +} diff --git a/deploy/aws/eks/cluster.tf b/deploy/aws/eks/cluster.tf new file mode 100644 index 0000000000..90e84c4064 --- /dev/null +++ b/deploy/aws/eks/cluster.tf @@ -0,0 +1,113 @@ +resource "aws_eks_cluster" "this" { + name = var.cluster_name + role_arn = aws_iam_role.cluster.arn + version = var.cluster_version + + vpc_config { + # TF-UPGRADE-TODO: In Terraform v0.10 and earlier, it was sometimes necessary to + # force an interpolation expression to be interpreted as a list by wrapping it + # in an extra set of list brackets. That form was supported for compatibilty in + # v0.11, but is no longer supported in Terraform v0.12. + # + # If the expression in the following list itself returns a list, remove the + # brackets to avoid interpretation as a list of lists. If the expression + # returns a single list item then leave it as-is and remove this TODO comment. + security_group_ids = [local.cluster_security_group_id] + subnet_ids = var.subnets + endpoint_private_access = var.cluster_endpoint_private_access + endpoint_public_access = var.cluster_endpoint_public_access + } + + timeouts { + create = var.cluster_create_timeout + delete = var.cluster_delete_timeout + } + + depends_on = [ + aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy, + aws_iam_role_policy_attachment.cluster_AmazonEKSServicePolicy, + ] +} + +resource "aws_security_group" "cluster" { + name_prefix = var.cluster_name + description = "EKS cluster security group." + vpc_id = var.vpc_id + tags = merge( + var.tags, + { + "Name" = "${var.cluster_name}-eks_cluster_sg" + }, + ) + count = var.cluster_create_security_group ? 1 : 0 +} + +resource "aws_security_group_rule" "cluster_egress_internet" { + description = "Allow cluster egress access to the Internet." + protocol = "-1" + security_group_id = aws_security_group.cluster[0].id + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + to_port = 0 + type = "egress" + count = var.cluster_create_security_group ? 1 : 0 +} + +resource "aws_security_group_rule" "cluster_https_worker_ingress" { + description = "Allow pods to communicate with the EKS cluster API." + protocol = "tcp" + security_group_id = aws_security_group.cluster[0].id + source_security_group_id = local.worker_security_group_id + from_port = 443 + to_port = 443 + type = "ingress" + count = var.cluster_create_security_group ? 1 : 0 +} + +resource "aws_iam_role" "cluster" { + name_prefix = var.cluster_name + assume_role_policy = data.aws_iam_policy_document.cluster_assume_role_policy.json + permissions_boundary = var.permissions_boundary + path = var.iam_path + force_detach_policies = true +} + +resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSClusterPolicy" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy" + role = aws_iam_role.cluster.name +} + +resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSServicePolicy" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy" + role = aws_iam_role.cluster.name +} + +# resource "null_resource" "setup-env" { +# depends_on = [aws_eks_cluster.this] + +# # Always execute +# # triggers = { +# # deploy_timestamp = timestamp() +# # } + +# provisioner "local-exec" { +# working_dir = path.module +# command = < 0 ? " - ${join( + "\n - ", + var.kubeconfig_aws_authenticator_command_args, + )}" : " - ${join( + "\n - ", + formatlist("\"%s\"", ["token", "-i", aws_eks_cluster.this.name]), + )}" + aws_authenticator_additional_args = length(var.kubeconfig_aws_authenticator_additional_args) > 0 ? " - ${join( + "\n - ", + var.kubeconfig_aws_authenticator_additional_args, + )}" : "" + aws_authenticator_env_variables = length(var.kubeconfig_aws_authenticator_env_variables) > 0 ? " env:\n${join( + "\n", + data.template_file.aws_authenticator_env_variables.*.rendered, + )}" : "" + } +} + +data "template_file" "aws_authenticator_env_variables" { + template = <. + name: tidbclusters.pingcap.com +spec: + # group name to use for REST API: /apis// + group: pingcap.com + # list of versions supported by this CustomResourceDefinition + version: v1alpha1 + # either Namespaced or Cluster + scope: Namespaced + names: + # plural name to be used in the URL: /apis/// + plural: tidbclusters + # singular name to be used as an alias on the CLI and for display + singular: tidbcluster + # kind is normally the CamelCased singular type. Your resource manifests use this. + kind: TidbCluster + # shortNames allow shorter string to match your resource on the CLI + shortNames: + - tc + additionalPrinterColumns: + - name: PD + type: string + description: The image for PD cluster + JSONPath: .spec.pd.image + - name: Storage + type: string + description: The storage size specified for PD node + JSONPath: .spec.pd.requests.storage + - name: Ready + type: integer + description: The ready replicas number of PD cluster + JSONPath: .status.pd.statefulSet.readyReplicas + - name: Desire + type: integer + description: The desired replicas number of PD cluster + JSONPath: .spec.pd.replicas + - name: TiKV + type: string + description: The image for TiKV cluster + JSONPath: .spec.tikv.image + - name: Storage + type: string + description: The storage size specified for TiKV node + JSONPath: .spec.tikv.requests.storage + - name: Ready + type: integer + description: The ready replicas number of TiKV cluster + JSONPath: .status.tikv.statefulSet.readyReplicas + - name: Desire + type: integer + description: The desired replicas number of TiKV cluster + JSONPath: .spec.tikv.replicas + - name: TiDB + type: string + description: The image for TiDB cluster + JSONPath: .spec.tidb.image + - name: Ready + type: integer + description: The ready replicas number of TiDB cluster + JSONPath: .status.tidb.statefulSet.readyReplicas + - name: Desire + type: integer + description: The desired replicas number of TiDB cluster + JSONPath: .spec.tidb.replicas + validation: + # openAPIV3Schema is the schema for validating custom objects. + openAPIV3Schema: + properties: + spec: + properties: + pd: + properties: + limits: + properties: + cpu: + type: string + requests: + properties: + cpu: + type: string + tikv: + properties: + limits: + properties: + cpu: + type: string + requests: + properties: + cpu: + type: string + tidb: + properties: + limits: + properties: + cpu: + type: string + requests: + properties: + cpu: + type: string diff --git a/deploy/aws/manifests/gp2-storageclass.yaml b/deploy/aws/eks/manifests/gp2-storageclass.yaml similarity index 100% rename from deploy/aws/manifests/gp2-storageclass.yaml rename to deploy/aws/eks/manifests/gp2-storageclass.yaml diff --git a/deploy/aws/manifests/local-volume-provisioner.yaml b/deploy/aws/eks/manifests/local-volume-provisioner.yaml similarity index 100% rename from deploy/aws/manifests/local-volume-provisioner.yaml rename to deploy/aws/eks/manifests/local-volume-provisioner.yaml diff --git a/deploy/aws/eks/manifests/tiller-rbac.yaml b/deploy/aws/eks/manifests/tiller-rbac.yaml new file mode 100644 index 0000000000..d3ed1ad46e --- /dev/null +++ b/deploy/aws/eks/manifests/tiller-rbac.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: tiller + namespace: kube-system +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: tiller-clusterrolebinding +subjects: +- kind: ServiceAccount + name: tiller + namespace: kube-system +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: "" diff --git a/deploy/aws/eks/outputs.tf b/deploy/aws/eks/outputs.tf new file mode 100644 index 0000000000..25d655cf26 --- /dev/null +++ b/deploy/aws/eks/outputs.tf @@ -0,0 +1,98 @@ +output "cluster_id" { + description = "The name/id of the EKS cluster." + value = aws_eks_cluster.this.id +} + +# Though documented, not yet supported +# output "cluster_arn" { +# description = "The Amazon Resource Name (ARN) of the cluster." +# value = "${aws_eks_cluster.this.arn}" +# } + +output "cluster_certificate_authority_data" { + description = "Nested attribute containing certificate-authority-data for your cluster. This is the base64 encoded certificate data required to communicate with your cluster." + value = aws_eks_cluster.this.certificate_authority[0].data +} + +output "cluster_endpoint" { + description = "The endpoint for your EKS Kubernetes API." + value = aws_eks_cluster.this.endpoint +} + +output "cluster_version" { + description = "The Kubernetes server version for the EKS cluster." + value = aws_eks_cluster.this.version +} + +output "cluster_security_group_id" { + description = "Security group ID attached to the EKS cluster." + value = local.cluster_security_group_id +} + +output "config_map_aws_auth" { + description = "A kubernetes configuration to authenticate to this EKS cluster." + value = data.template_file.config_map_aws_auth.rendered +} + +output "cluster_iam_role_name" { + description = "IAM role name of the EKS cluster." + value = aws_iam_role.cluster.name +} + +output "cluster_iam_role_arn" { + description = "IAM role ARN of the EKS cluster." + value = aws_iam_role.cluster.arn +} + +output "kubeconfig" { + description = "kubectl config file contents for this EKS cluster." + value = data.template_file.kubeconfig.rendered +} + +output "kubeconfig_filename" { + description = "The filename of the generated kubectl config." + value = element(concat(local_file.kubeconfig.*.filename, [""]), 0) +} + +output "workers_asg_arns" { + description = "IDs of the autoscaling groups containing workers." + value = concat( + aws_autoscaling_group.workers.*.arn, + aws_autoscaling_group.workers_launch_template.*.arn, + ) +} + +output "workers_asg_names" { + description = "Names of the autoscaling groups containing workers." + value = concat( + aws_autoscaling_group.workers.*.id, + aws_autoscaling_group.workers_launch_template.*.id, + ) +} + +output "worker_security_group_id" { + description = "Security group ID attached to the EKS workers." + value = local.worker_security_group_id +} + +output "worker_iam_role_name" { + description = "default IAM role name for EKS worker groups" + value = aws_iam_role.workers.name +} + +output "worker_iam_role_arn" { + description = "default IAM role ARN for EKS worker groups" + value = aws_iam_role.workers.arn +} + +output "worker_iam_role" { + value = aws_iam_role.workers +} + +output "worker_iam_instance_profile" { +value = aws_iam_instance_profile.workers +} + +output "eks_info" { + value = local.eks_info +} diff --git a/deploy/aws/eks/templates/config-map-aws-auth-map_accounts.yaml.tpl b/deploy/aws/eks/templates/config-map-aws-auth-map_accounts.yaml.tpl new file mode 100644 index 0000000000..26dc5078f4 --- /dev/null +++ b/deploy/aws/eks/templates/config-map-aws-auth-map_accounts.yaml.tpl @@ -0,0 +1 @@ + - "${account_number}" diff --git a/deploy/aws/eks/templates/config-map-aws-auth-map_roles.yaml.tpl b/deploy/aws/eks/templates/config-map-aws-auth-map_roles.yaml.tpl new file mode 100644 index 0000000000..9f321b7be6 --- /dev/null +++ b/deploy/aws/eks/templates/config-map-aws-auth-map_roles.yaml.tpl @@ -0,0 +1,4 @@ + - rolearn: ${role_arn} + username: ${username} + groups: + - ${group} diff --git a/deploy/aws/eks/templates/config-map-aws-auth-map_users.yaml.tpl b/deploy/aws/eks/templates/config-map-aws-auth-map_users.yaml.tpl new file mode 100644 index 0000000000..92499de41c --- /dev/null +++ b/deploy/aws/eks/templates/config-map-aws-auth-map_users.yaml.tpl @@ -0,0 +1,4 @@ + - userarn: ${user_arn} + username: ${username} + groups: + - ${group} diff --git a/deploy/aws/eks/templates/config-map-aws-auth.yaml.tpl b/deploy/aws/eks/templates/config-map-aws-auth.yaml.tpl new file mode 100644 index 0000000000..86f4f5f998 --- /dev/null +++ b/deploy/aws/eks/templates/config-map-aws-auth.yaml.tpl @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: aws-auth + namespace: kube-system +data: + mapRoles: | +${worker_role_arn} +${map_roles} + mapUsers: | +${map_users} + mapAccounts: | +${map_accounts} diff --git a/deploy/aws/eks/templates/kubeconfig.tpl b/deploy/aws/eks/templates/kubeconfig.tpl new file mode 100644 index 0000000000..1696391e89 --- /dev/null +++ b/deploy/aws/eks/templates/kubeconfig.tpl @@ -0,0 +1,28 @@ +apiVersion: v1 +preferences: {} +kind: Config + +clusters: +- cluster: + server: ${endpoint} + certificate-authority-data: ${cluster_auth_base64} + name: ${kubeconfig_name} + +contexts: +- context: + cluster: ${kubeconfig_name} + user: ${kubeconfig_name} + name: ${kubeconfig_name} + +current-context: ${kubeconfig_name} + +users: +- name: ${kubeconfig_name} + user: + exec: + apiVersion: client.authentication.k8s.io/v1alpha1 + command: ${aws_authenticator_command} + args: +${aws_authenticator_command_args} +${aws_authenticator_additional_args} +${aws_authenticator_env_variables} diff --git a/deploy/aws/eks/templates/userdata.sh.tpl b/deploy/aws/eks/templates/userdata.sh.tpl new file mode 100644 index 0000000000..ba8ea2800b --- /dev/null +++ b/deploy/aws/eks/templates/userdata.sh.tpl @@ -0,0 +1,10 @@ +#!/bin/bash -xe + +# Allow user supplied pre userdata code +${pre_userdata} + +# Bootstrap and join the cluster +/etc/eks/bootstrap.sh --b64-cluster-ca '${cluster_auth_base64}' --apiserver-endpoint '${endpoint}' ${bootstrap_extra_args} --kubelet-extra-args '${kubelet_extra_args}' '${cluster_name}' + +# Allow user supplied userdata code +${additional_userdata} diff --git a/deploy/aws/eks/templates/worker-role.tpl b/deploy/aws/eks/templates/worker-role.tpl new file mode 100644 index 0000000000..2a9af5863f --- /dev/null +++ b/deploy/aws/eks/templates/worker-role.tpl @@ -0,0 +1,5 @@ + - rolearn: ${worker_role_arn} + username: system:node:{{EC2PrivateDNSName}} + groups: + - system:bootstrappers + - system:nodes diff --git a/deploy/aws/eks/variables.tf b/deploy/aws/eks/variables.tf new file mode 100644 index 0000000000..a4aa09340f --- /dev/null +++ b/deploy/aws/eks/variables.tf @@ -0,0 +1,262 @@ +variable "cluster_name" { + description = "Name of the EKS cluster. Also used as a prefix in names of related resources." +} + +variable "cluster_security_group_id" { + description = "If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingres/egress to work with the workers and provide API access to your current IP/32." + default = "" +} + +variable "cluster_version" { + description = "Kubernetes version to use for the EKS cluster." + default = "1.12" +} + +variable "operator_version" { + description = "tidb operator version" + default = "v1.0.0-beta.3" +} + +variable "config_output_path" { + description = "Where to save the Kubectl config file (if `write_kubeconfig = true`). Should end in a forward slash `/` ." + default = "./" +} + +variable "write_kubeconfig" { + description = "Whether to write a Kubectl config file containing the cluster configuration. Saved to `config_output_path`." + default = true +} + +variable "manage_aws_auth" { + description = "Whether to apply the aws-auth configmap file." + default = true +} + +variable "write_aws_auth_config" { + description = "Whether to write the aws-auth configmap file." + default = true +} + +variable "map_accounts" { + description = "Additional AWS account numbers to add to the aws-auth configmap. See examples/eks_test_fixture/variables.tf for example format." + type = list(string) + default = [] +} + +variable "map_accounts_count" { + description = "The count of accounts in the map_accounts list." + type = string + default = 0 +} + +variable "map_roles" { + description = "Additional IAM roles to add to the aws-auth configmap. See examples/eks_test_fixture/variables.tf for example format." + type = list(map(string)) + default = [] +} + +variable "map_roles_count" { + description = "The count of roles in the map_roles list." + type = string + default = 0 +} + +variable "map_users" { + description = "Additional IAM users to add to the aws-auth configmap. See examples/eks_test_fixture/variables.tf for example format." + type = list(map(string)) + default = [] +} + +variable "map_users_count" { + description = "The count of roles in the map_users list." + type = string + default = 0 +} + +variable "subnets" { + description = "A list of subnets to place the EKS cluster and workers within." + type = list(string) +} + +variable "tags" { + description = "A map of tags to add to all resources." + type = map(string) + default = {} +} + +variable "vpc_id" { + description = "VPC where the cluster and workers will be deployed." +} + +variable "worker_groups" { + description = "A list of maps defining worker group configurations to be defined using AWS Launch Configurations. See workers_group_defaults for valid keys." + type = list(map(string)) + + default = [ + { + "name" = "default" + }, + ] +} + +variable "worker_group_count" { + description = "The number of maps contained within the worker_groups list." + type = string + default = "1" +} + +variable "workers_group_defaults" { + description = "Override default values for target groups. See workers_group_defaults_defaults in local.tf for valid keys." + type = map(string) + default = {} +} + +variable "worker_group_tags" { + description = "A map defining extra tags to be applied to the worker group ASG." + type = map(list(string)) + + default = { + default = [] + } +} + +variable "worker_groups_launch_template" { + description = "A list of maps defining worker group configurations to be defined using AWS Launch Templates. See workers_group_defaults for valid keys." + type = list(map(string)) + + default = [ + { + "name" = "default" + }, + ] +} + +variable "worker_group_launch_template_count" { + description = "The number of maps contained within the worker_groups_launch_template list." + type = string + default = "0" +} + +variable "workers_group_launch_template_defaults" { + description = "Override default values for target groups. See workers_group_defaults_defaults in local.tf for valid keys." + type = map(string) + default = {} +} + +variable "worker_group_launch_template_tags" { + description = "A map defining extra tags to be applied to the worker group template ASG." + type = map(list(string)) + + default = { + default = [] + } +} + +variable "worker_security_group_id" { + description = "If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingres/egress to work with the EKS cluster." + default = "" +} + +variable "worker_ami_name_filter" { + description = "Additional name filter for AWS EKS worker AMI. Default behaviour will get latest for the cluster_version but could be set to a release from amazon-eks-ami, e.g. \"v20190220\"" + default = "v*" +} + +variable "worker_additional_security_group_ids" { + description = "A list of additional security group ids to attach to worker instances" + type = list(string) + default = [] +} + +variable "worker_sg_ingress_from_port" { + description = "Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443)." + default = "1025" +} + +variable "workers_additional_policies" { + description = "Additional policies to be added to workers" + type = list(string) + default = [] +} + +variable "workers_additional_policies_count" { + default = 0 +} + +variable "kubeconfig_aws_authenticator_command" { + description = "Command to use to fetch AWS EKS credentials." + default = "aws-iam-authenticator" +} + +variable "kubeconfig_aws_authenticator_command_args" { + description = "Default arguments passed to the authenticator command. Defaults to [token -i $cluster_name]." + type = list(string) + default = [] +} + +variable "kubeconfig_aws_authenticator_additional_args" { + description = "Any additional arguments to pass to the authenticator such as the role to assume. e.g. [\"-r\", \"MyEksRole\"]." + type = list(string) + default = [] +} + +variable "kubeconfig_aws_authenticator_env_variables" { + description = "Environment variables that should be used when executing the authenticator. e.g. { AWS_PROFILE = \"eks\"}." + type = map(string) + default = {} +} + +variable "kubeconfig_name" { + description = "Override the default name used for items kubeconfig." + default = "" +} + +variable "cluster_create_timeout" { + description = "Timeout value when creating the EKS cluster." + default = "15m" +} + +variable "cluster_delete_timeout" { + description = "Timeout value when deleting the EKS cluster." + default = "15m" +} + +variable "local_exec_interpreter" { + description = "Command to run for local-exec resources. Must be a shell-style interpreter. If you are on Windows Git Bash is a good choice." + type = list(string) + default = ["/bin/sh", "-c"] +} + +variable "cluster_create_security_group" { + description = "Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id`." + default = true +} + +variable "worker_create_security_group" { + description = "Whether to create a security group for the workers or attach the workers to `worker_security_group_id`." + default = true +} + +variable "permissions_boundary" { + description = "If provided, all IAM roles will be created with this permissions boundary attached." + default = "" +} + +variable "iam_path" { + description = "If provided, all IAM roles will be created on this path." + default = "/" +} + +variable "cluster_endpoint_private_access" { + description = "Indicates whether or not the Amazon EKS private API server endpoint is enabled." + default = false +} + +variable "cluster_endpoint_public_access" { + description = "Indicates whether or not the Amazon EKS public API server endpoint is enabled." + default = true +} + +variable "ssh_key_name" { + type = string +} diff --git a/deploy/aws/eks/workers.tf b/deploy/aws/eks/workers.tf new file mode 100644 index 0000000000..93c1279db0 --- /dev/null +++ b/deploy/aws/eks/workers.tf @@ -0,0 +1,381 @@ +# Worker Groups using Launch Configurations + +resource "aws_autoscaling_group" "workers" { + name_prefix = "${aws_eks_cluster.this.name}-${lookup(local.control_worker_groups[count.index], "name", count.index)}" + desired_capacity = lookup( + local.control_worker_groups[count.index], + "asg_desired_capacity", + local.workers_group_defaults["asg_desired_capacity"], + ) + max_size = lookup( + local.control_worker_groups[count.index], + "asg_max_size", + local.workers_group_defaults["asg_max_size"], + ) + min_size = lookup( + local.control_worker_groups[count.index], + "asg_min_size", + local.workers_group_defaults["asg_min_size"], + ) + force_delete = lookup( + local.control_worker_groups[count.index], + "asg_force_delete", + local.workers_group_defaults["asg_force_delete"], + ) + # target_group_arns = compact( + # split( + # ",", + # coalesce( + # lookup(local.control_worker_groups[count.index], "target_group_arns", ""), + # local.workers_group_defaults["target_group_arns"], + # ), + # ), + # ) + launch_configuration = element(aws_launch_configuration.workers.*.id, count.index) + vpc_zone_identifier = split( + ",", + coalesce( + lookup(local.control_worker_groups[count.index], "subnets", ""), + local.workers_group_defaults["subnets"], + ), + ) + protect_from_scale_in = lookup( + local.control_worker_groups[count.index], + "protect_from_scale_in", + local.workers_group_defaults["protect_from_scale_in"], + ) + # suspended_processes = compact( + # split( + # ",", + # coalesce( + # lookup(local.control_worker_groups[count.index], "suspended_processes", ""), + # local.workers_group_defaults["suspended_processes"], + # ), + # ), + # ) + # enabled_metrics = compact( + # split( + # ",", + # coalesce( + # lookup(local.control_worker_groups[count.index], "enabled_metrics", ""), + # local.workers_group_defaults["enabled_metrics"], + # ), + # ), + # ) + count = var.worker_group_count + placement_group = lookup( + local.control_worker_groups[count.index], + "placement_group", + local.workers_group_defaults["placement_group"], + ) + + tags = concat( + [ + { + "key" = "Name" + "value" = "${aws_eks_cluster.this.name}-${lookup(local.control_worker_groups[count.index], "name", count.index)}-eks_asg" + "propagate_at_launch" = true + }, + { + "key" = "kubernetes.io/cluster/${aws_eks_cluster.this.name}" + "value" = "owned" + "propagate_at_launch" = true + }, + { + "key" = "k8s.io/cluster-autoscaler/${lookup( + local.control_worker_groups[count.index], + "autoscaling_enabled", + local.workers_group_defaults["autoscaling_enabled"], + ) == 1 ? "enabled" : "disabled"}" + "value" = "true" + "propagate_at_launch" = false + }, + # { + # "key" = "k8s.io/cluster-autoscaler/${aws_eks_cluster.this.name}" + # "value" = "" + # "propagate_at_launch" = false + # }, + { + "key" = "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage" + "value" = "${lookup( + local.control_worker_groups[count.index], + "root_volume_size", + local.workers_group_defaults["root_volume_size"], + )}Gi" + "propagate_at_launch" = false + }, + ], + local.asg_tags, + var.worker_group_tags[contains( + keys(var.worker_group_tags), + lookup(local.control_worker_groups[count.index], "name", count.index), + ) ? lookup(local.control_worker_groups[count.index], "name", count.index) : "default"], + ) + + + lifecycle { + create_before_destroy = true + # ignore_changes = ["desired_capacity"] + } +} + +resource "aws_launch_configuration" "workers" { + name_prefix = "${aws_eks_cluster.this.name}-${lookup(local.control_worker_groups[count.index], "name", count.index)}" + associate_public_ip_address = lookup( + local.control_worker_groups[count.index], + "public_ip", + local.workers_group_defaults["public_ip"], + ) + security_groups = concat([local.worker_security_group_id], var.worker_additional_security_group_ids, compact( + split( + ",", + lookup( + local.control_worker_groups[count.index], + "additional_security_group_ids", + local.workers_group_defaults["additional_security_group_ids"], + ), + ), + )) + iam_instance_profile = element(aws_iam_instance_profile.workers.*.id, count.index) + image_id = lookup( + local.control_worker_groups[count.index], + "ami_id", + local.workers_group_defaults["ami_id"], + ) + instance_type = lookup( + local.control_worker_groups[count.index], + "instance_type", + local.workers_group_defaults["instance_type"], + ) + key_name = lookup( + local.control_worker_groups[count.index], + "key_name", + local.workers_group_defaults["key_name"], + ) + user_data_base64 = base64encode(element(data.template_file.userdata.*.rendered, count.index)) + ebs_optimized = lookup( + local.control_worker_groups[count.index], + "ebs_optimized", + lookup( + local.ebs_optimized, + lookup( + local.control_worker_groups[count.index], + "instance_type", + local.workers_group_defaults["instance_type"], + ), + false, + ), + ) + enable_monitoring = lookup( + local.control_worker_groups[count.index], + "enable_monitoring", + local.workers_group_defaults["enable_monitoring"], + ) + spot_price = lookup( + local.control_worker_groups[count.index], + "spot_price", + local.workers_group_defaults["spot_price"], + ) + placement_tenancy = lookup( + local.control_worker_groups[count.index], + "placement_tenancy", + local.workers_group_defaults["placement_tenancy"], + ) + count = var.worker_group_count + + lifecycle { + create_before_destroy = true + } + + root_block_device { + volume_size = lookup( + local.control_worker_groups[count.index], + "root_volume_size", + local.workers_group_defaults["root_volume_size"], + ) + volume_type = lookup( + local.control_worker_groups[count.index], + "root_volume_type", + local.workers_group_defaults["root_volume_type"], + ) + iops = lookup( + local.control_worker_groups[count.index], + "root_iops", + local.workers_group_defaults["root_iops"], + ) + delete_on_termination = true + } +} + +resource "aws_security_group" "workers" { + name_prefix = aws_eks_cluster.this.name + description = "Security group for all nodes in the cluster." + vpc_id = var.vpc_id + count = var.worker_create_security_group ? 1 : 0 + tags = merge( + var.tags, + { + "Name" = "${aws_eks_cluster.this.name}-eks_worker_sg" + "kubernetes.io/cluster/${aws_eks_cluster.this.name}" = "owned" + }, + ) +} + +resource "aws_security_group_rule" "workers_egress_internet" { + description = "Allow nodes all egress to the Internet." + protocol = "-1" + security_group_id = aws_security_group.workers[0].id + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + to_port = 0 + type = "egress" + count = var.worker_create_security_group ? 1 : 0 +} + +resource "aws_security_group_rule" "workers_ingress_self" { + description = "Allow node to communicate with each other." + protocol = "-1" + security_group_id = aws_security_group.workers[0].id + source_security_group_id = aws_security_group.workers[0].id + from_port = 0 + to_port = 65535 + type = "ingress" + count = var.worker_create_security_group ? 1 : 0 +} + +resource "aws_security_group_rule" "workers_ingress_cluster" { + description = "Allow workers pods to receive communication from the cluster control plane." + protocol = "tcp" + security_group_id = aws_security_group.workers[0].id + source_security_group_id = local.cluster_security_group_id + from_port = var.worker_sg_ingress_from_port + to_port = 65535 + type = "ingress" + count = var.worker_create_security_group ? 1 : 0 +} + +resource "aws_security_group_rule" "workers_ingress_cluster_kubelet" { + description = "Allow workers Kubelets to receive communication from the cluster control plane." + protocol = "tcp" + security_group_id = aws_security_group.workers[0].id + source_security_group_id = local.cluster_security_group_id + from_port = 10250 + to_port = 10250 + type = "ingress" + count = var.worker_create_security_group ? var.worker_sg_ingress_from_port > 10250 ? 1 : 0 : 0 +} + +resource "aws_security_group_rule" "workers_ingress_cluster_https" { + description = "Allow pods running extension API servers on port 443 to receive communication from cluster control plane." + protocol = "tcp" + security_group_id = aws_security_group.workers[0].id + source_security_group_id = local.cluster_security_group_id + from_port = 443 + to_port = 443 + type = "ingress" + count = var.worker_create_security_group ? 1 : 0 +} + +resource "aws_iam_role" "workers" { + name_prefix = aws_eks_cluster.this.name + assume_role_policy = data.aws_iam_policy_document.workers_assume_role_policy.json + permissions_boundary = var.permissions_boundary + path = var.iam_path + force_detach_policies = true +} + +resource "aws_iam_instance_profile" "workers" { + name_prefix = aws_eks_cluster.this.name + role = lookup( + local.control_worker_groups[count.index], + "iam_role_id", + local.workers_group_defaults["iam_role_id"], + ) + count = var.worker_group_count + path = var.iam_path +} + +resource "aws_iam_role_policy_attachment" "workers_AmazonEKSWorkerNodePolicy" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" + role = aws_iam_role.workers.name +} + +resource "aws_iam_role_policy_attachment" "workers_AmazonEKS_CNI_Policy" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" + role = aws_iam_role.workers.name +} + +resource "aws_iam_role_policy_attachment" "workers_AmazonEC2ContainerRegistryReadOnly" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + role = aws_iam_role.workers.name +} + +resource "aws_iam_role_policy_attachment" "workers_additional_policies" { + count = var.workers_additional_policies_count + role = aws_iam_role.workers.name + policy_arn = var.workers_additional_policies[count.index] +} + +resource "null_resource" "tags_as_list_of_maps" { + count = length(keys(var.tags)) + + triggers = { + key = element(keys(var.tags), count.index) + value = element(values(var.tags), count.index) + propagate_at_launch = "true" + } +} + +resource "aws_iam_role_policy_attachment" "workers_autoscaling" { + policy_arn = aws_iam_policy.worker_autoscaling.arn + role = aws_iam_role.workers.name +} + +resource "aws_iam_policy" "worker_autoscaling" { + name_prefix = "eks-worker-autoscaling-${aws_eks_cluster.this.name}" + description = "EKS worker node autoscaling policy for cluster ${aws_eks_cluster.this.name}" + policy = data.aws_iam_policy_document.worker_autoscaling.json + path = var.iam_path +} + +data "aws_iam_policy_document" "worker_autoscaling" { + statement { + sid = "eksWorkerAutoscalingAll" + effect = "Allow" + + actions = [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:DescribeLaunchTemplateVersions", + ] + + resources = ["*"] + } + + statement { + sid = "eksWorkerAutoscalingOwn" + effect = "Allow" + + actions = [ + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "autoscaling:UpdateAutoScalingGroup", + ] + + resources = ["*"] + + condition { + test = "StringEquals" + variable = "autoscaling:ResourceTag/kubernetes.io/cluster/${aws_eks_cluster.this.name}" + values = ["owned"] + } + + condition { + test = "StringEquals" + variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled" + values = ["true"] + } + } +} diff --git a/deploy/aws/eks/workers_launch_template.tf b/deploy/aws/eks/workers_launch_template.tf new file mode 100644 index 0000000000..ec35958816 --- /dev/null +++ b/deploy/aws/eks/workers_launch_template.tf @@ -0,0 +1,342 @@ +# Worker Groups using Launch Templates + +resource "aws_autoscaling_group" "workers_launch_template" { + name_prefix = "${aws_eks_cluster.this.name}-${lookup( + var.worker_groups_launch_template[count.index], + "name", + count.index, + )}" + desired_capacity = lookup( + var.worker_groups_launch_template[count.index], + "asg_desired_capacity", + local.workers_group_launch_template_defaults["asg_desired_capacity"], + ) + max_size = lookup( + var.worker_groups_launch_template[count.index], + "asg_max_size", + local.workers_group_launch_template_defaults["asg_max_size"], + ) + min_size = lookup( + var.worker_groups_launch_template[count.index], + "asg_min_size", + local.workers_group_launch_template_defaults["asg_min_size"], + ) + force_delete = lookup( + var.worker_groups_launch_template[count.index], + "asg_force_delete", + local.workers_group_launch_template_defaults["asg_force_delete"], + ) + # target_group_arns = compact( + # split( + # ",", + # coalesce( + # lookup( + # var.worker_groups_launch_template[count.index], + # "target_group_arns", + # "", + # ), + # local.workers_group_launch_template_defaults["target_group_arns"], + # ), + # ), + # ) + + mixed_instances_policy { + instances_distribution { + on_demand_allocation_strategy = lookup( + var.worker_groups_launch_template[count.index], + "on_demand_allocation_strategy", + local.workers_group_launch_template_defaults["on_demand_allocation_strategy"], + ) + on_demand_base_capacity = lookup( + var.worker_groups_launch_template[count.index], + "on_demand_base_capacity", + local.workers_group_launch_template_defaults["on_demand_base_capacity"], + ) + on_demand_percentage_above_base_capacity = lookup( + var.worker_groups_launch_template[count.index], + "on_demand_percentage_above_base_capacity", + local.workers_group_launch_template_defaults["on_demand_percentage_above_base_capacity"], + ) + spot_allocation_strategy = lookup( + var.worker_groups_launch_template[count.index], + "spot_allocation_strategy", + local.workers_group_launch_template_defaults["spot_allocation_strategy"], + ) + spot_instance_pools = lookup( + var.worker_groups_launch_template[count.index], + "spot_instance_pools", + local.workers_group_launch_template_defaults["spot_instance_pools"], + ) + spot_max_price = lookup( + var.worker_groups_launch_template[count.index], + "spot_max_price", + local.workers_group_launch_template_defaults["spot_max_price"], + ) + } + + launch_template { + launch_template_specification { + launch_template_id = element( + aws_launch_template.workers_launch_template.*.id, + count.index, + ) + version = "$Latest" + } + + override { + instance_type = lookup( + var.worker_groups_launch_template[count.index], + "instance_type", + local.workers_group_launch_template_defaults["instance_type"], + ) + } + + override { + instance_type = lookup( + var.worker_groups_launch_template[count.index], + "override_instance_type", + local.workers_group_launch_template_defaults["override_instance_type"], + ) + } + } + } + + vpc_zone_identifier = split( + ",", + coalesce( + lookup( + var.worker_groups_launch_template[count.index], + "subnets", + "", + ), + local.workers_group_launch_template_defaults["subnets"], + ), + ) + protect_from_scale_in = lookup( + var.worker_groups_launch_template[count.index], + "protect_from_scale_in", + local.workers_group_launch_template_defaults["protect_from_scale_in"], + ) + # suspended_processes = compact( + # split( + # ",", + # coalesce( + # lookup( + # var.worker_groups_launch_template[count.index], + # "suspended_processes", + # "", + # ), + # local.workers_group_launch_template_defaults["suspended_processes"], + # ), + # ), + # ) + # enabled_metrics = compact( + # split( + # ",", + # coalesce( + # lookup( + # var.worker_groups_launch_template[count.index], + # "enabled_metrics", + # "", + # ), + # local.workers_group_launch_template_defaults["enabled_metrics"], + # ), + # ), + # ) + count = var.worker_group_launch_template_count + + tags = concat( + [ + { + "key" = "Name" + "value" = "${aws_eks_cluster.this.name}-${lookup( + var.worker_groups_launch_template[count.index], + "name", + count.index, + )}-eks_asg" + "propagate_at_launch" = true + }, + { + "key" = "kubernetes.io/cluster/${aws_eks_cluster.this.name}" + "value" = "owned" + "propagate_at_launch" = true + }, + { + "key" = "k8s.io/cluster-autoscaler/${lookup( + var.worker_groups_launch_template[count.index], + "autoscaling_enabled", + local.workers_group_launch_template_defaults["autoscaling_enabled"], + ) == 1 ? "enabled" : "disabled"}" + "value" = "true" + "propagate_at_launch" = false + }, + # { + # "key" = "k8s.io/cluster-autoscaler/${aws_eks_cluster.this.name}" + # "value" = "" + # "propagate_at_launch" = false + # }, + { + "key" = "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage" + "value" = "${lookup( + var.worker_groups_launch_template[count.index], + "root_volume_size", + local.workers_group_launch_template_defaults["root_volume_size"], + )}Gi" + "propagate_at_launch" = false + }, + ], + local.asg_tags, + var.worker_group_launch_template_tags[contains( + keys(var.worker_group_launch_template_tags), + lookup( + var.worker_groups_launch_template[count.index], + "name", + count.index, + ), + ) ? lookup( + var.worker_groups_launch_template[count.index], + "name", + count.index, + ) : "default"], + ) + + lifecycle { + create_before_destroy = true + + ignore_changes = [desired_capacity] + } +} + +resource "aws_launch_template" "workers_launch_template" { + name_prefix = "${aws_eks_cluster.this.name}-${lookup( + var.worker_groups_launch_template[count.index], + "name", + count.index, + )}" + + network_interfaces { + associate_public_ip_address = lookup( + var.worker_groups_launch_template[count.index], + "public_ip", + local.workers_group_launch_template_defaults["public_ip"], + ) + security_groups = concat([local.worker_security_group_id], var.worker_additional_security_group_ids, compact( + split( + ",", + lookup( + var.worker_groups_launch_template[count.index], + "additional_security_group_ids", + local.workers_group_launch_template_defaults["additional_security_group_ids"], + ), + ), + )) + } + + iam_instance_profile { + name = element( + aws_iam_instance_profile.workers_launch_template.*.name, + count.index, + ) + } + + image_id = lookup( + var.worker_groups_launch_template[count.index], + "ami_id", + local.workers_group_launch_template_defaults["ami_id"], + ) + instance_type = lookup( + var.worker_groups_launch_template[count.index], + "instance_type", + local.workers_group_launch_template_defaults["instance_type"], + ) + key_name = lookup( + var.worker_groups_launch_template[count.index], + "key_name", + local.workers_group_launch_template_defaults["key_name"], + ) + user_data = base64encode( + element( + data.template_file.launch_template_userdata.*.rendered, + count.index, + ), + ) + ebs_optimized = lookup( + var.worker_groups_launch_template[count.index], + "ebs_optimized", + lookup( + local.ebs_optimized, + lookup( + var.worker_groups_launch_template[count.index], + "instance_type", + local.workers_group_launch_template_defaults["instance_type"], + ), + false, + ), + ) + + monitoring { + enabled = lookup( + var.worker_groups_launch_template[count.index], + "enable_monitoring", + local.workers_group_launch_template_defaults["enable_monitoring"], + ) + } + + placement { + tenancy = lookup( + var.worker_groups_launch_template[count.index], + "placement_tenancy", + local.workers_group_launch_template_defaults["placement_tenancy"], + ) + } + + count = var.worker_group_launch_template_count + + lifecycle { + create_before_destroy = true + } + + block_device_mappings { + device_name = data.aws_ami.eks_worker.root_device_name + + ebs { + volume_size = lookup( + var.worker_groups_launch_template[count.index], + "root_volume_size", + local.workers_group_launch_template_defaults["root_volume_size"], + ) + volume_type = lookup( + var.worker_groups_launch_template[count.index], + "root_volume_type", + local.workers_group_launch_template_defaults["root_volume_type"], + ) + iops = lookup( + var.worker_groups_launch_template[count.index], + "root_iops", + local.workers_group_launch_template_defaults["root_iops"], + ) + encrypted = lookup( + var.worker_groups_launch_template[count.index], + "root_encrypted", + local.workers_group_launch_template_defaults["root_encrypted"], + ) + kms_key_id = lookup( + var.worker_groups_launch_template[count.index], + "kms_key_id", + local.workers_group_launch_template_defaults["kms_key_id"], + ) + delete_on_termination = true + } + } +} + +resource "aws_iam_instance_profile" "workers_launch_template" { + name_prefix = aws_eks_cluster.this.name + role = lookup( + var.worker_groups_launch_template[count.index], + "iam_role_id", + local.workers_group_launch_template_defaults["iam_role_id"], + ) + count = var.worker_group_launch_template_count + path = var.iam_path +} diff --git a/deploy/aws/main.tf b/deploy/aws/main.tf index f5687f5b04..af85dc7e25 100644 --- a/deploy/aws/main.tf +++ b/deploy/aws/main.tf @@ -1,242 +1,57 @@ provider "aws" { - region = "${var.region}" + region = var.region } -module "key-pair" { - source = "cloudposse/key-pair/aws" - version = "0.3.2" - - name = "${var.cluster_name}" - namespace = "k8s" - stage = "prod" - ssh_public_key_path = "${path.module}/credentials/" - generate_ssh_key = "true" - private_key_extension = ".pem" - chmod_command = "chmod 600 %v" +locals { + kubeconfig = "${path.module}/credentials/kubeconfig-${var.eks_name}" } -resource "aws_security_group" "ssh" { - name = "${var.cluster_name}" - description = "Allow SSH access for bastion instance" - vpc_id = "${var.create_vpc ? module.vpc.vpc_id : var.vpc_id}" - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = "${var.ingress_cidr}" - } - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } +module "key-pair" { + source = "./aws-key-pair" + name = var.eks_name + path = "${path.module}/credentials/" } module "vpc" { source = "terraform-aws-modules/vpc/aws" - version = "1.60.0" - name = "${var.cluster_name}" - cidr = "${var.vpc_cidr}" - create_vpc = "${var.create_vpc}" - azs = ["${data.aws_availability_zones.available.names[0]}", "${data.aws_availability_zones.available.names[1]}", "${data.aws_availability_zones.available.names[2]}"] - private_subnets = "${var.private_subnets}" - public_subnets = "${var.public_subnets}" + + version = "2.6.0" + name = var.eks_name + cidr = var.vpc_cidr + create_vpc = var.create_vpc + # azs = [data.aws_availability_zones.available.names[0], data.aws_availability_zones.available.names[1], data.aws_availability_zones.available.names[2]] + azs = data.aws_availability_zones.available.names + private_subnets = var.private_subnets + public_subnets = var.public_subnets enable_nat_gateway = true single_nat_gateway = true # The following tags are required for ELB private_subnet_tags = { - "kubernetes.io/cluster/${var.cluster_name}" = "shared" + "kubernetes.io/cluster/${var.eks_name}" = "shared" } public_subnet_tags = { - "kubernetes.io/cluster/${var.cluster_name}" = "shared" + "kubernetes.io/cluster/${var.eks_name}" = "shared" } vpc_tags = { - "kubernetes.io/cluster/${var.cluster_name}" = "shared" - } -} - -module "ec2" { - source = "terraform-aws-modules/ec2-instance/aws" - version = "1.21.0" - name = "${var.cluster_name}-bastion" - instance_count = "${var.create_bastion ? 1:0}" - ami = "${data.aws_ami.amazon-linux-2.id}" - instance_type = "${var.bastion_instance_type}" - key_name = "${module.key-pair.key_name}" - associate_public_ip_address = true - monitoring = false - user_data = "${file("bastion-userdata")}" - vpc_security_group_ids = ["${aws_security_group.ssh.id}"] - subnet_ids = "${split(",", var.create_vpc ? join(",", module.vpc.public_subnets) : join(",", var.subnets))}" - - tags = { - app = "tidb" + "kubernetes.io/cluster/${var.eks_name}" = "shared" } } module "eks" { - # source = "terraform-aws-modules/eks/aws" - # version = "2.3.1" - # We can not use cluster autoscaler for pod with local PV due to the limitations listed here: - # https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#i-have-a-couple-of-pending-pods-but-there-was-no-scale-up - # so we scale out by updating auto-scaling-group desired_capacity directly via the patched version of aws eks module - source = "github.com/tennix/terraform-aws-eks?ref=v2.3.1-patch" - cluster_name = "${var.cluster_name}" - cluster_version = "${var.k8s_version}" + source = "./eks" + cluster_name = var.eks_name + cluster_version = var.eks_version + operator_version = var.operator_version + ssh_key_name = module.key-pair.key_name config_output_path = "credentials/" - subnets = "${split(",", var.create_vpc ? join(",", module.vpc.private_subnets) : join(",", var.subnets))}" - vpc_id = "${var.create_vpc ? module.vpc.vpc_id : var.vpc_id}" - - # instance types: https://aws.amazon.com/ec2/instance-types/ - # instance prices: https://aws.amazon.com/ec2/pricing/on-demand/ - - worker_groups = [ - { - # pd - name = "pd_worker_group" - key_name = "${module.key-pair.key_name}" - # WARNING: if you change instance type, you must also modify the corresponding disk mounting in pd-userdata.sh script - # instance_type = "c5d.xlarge" # 4c, 8G, 100G NVMe SSD - instance_type = "${var.pd_instance_type}" # m5d.xlarge 4c, 16G, 150G NVMe SSD - root_volume_size = "50" # rest NVMe disk for PD data - public_ip = false - kubelet_extra_args = "--register-with-taints=dedicated=pd:NoSchedule --node-labels=dedicated=pd" - asg_desired_capacity = "${var.pd_count}" - asg_max_size = "${var.pd_count + 2}" - additional_userdata = "${file("userdata.sh")}" - }, - { # tikv - name = "tikv_worker_group" - key_name = "${module.key-pair.key_name}" - # WARNING: if you change instance type, you must also modify the corresponding disk mounting in tikv-userdata.sh script - instance_type = "${var.tikv_instance_type}" # i3.2xlarge 8c, 61G, 1.9T NVMe SSD - root_volume_type = "gp2" - root_volume_size = "100" - public_ip = false - kubelet_extra_args = "--register-with-taints=dedicated=tikv:NoSchedule --node-labels=dedicated=tikv" - asg_desired_capacity = "${var.tikv_count}" - asg_max_size = "${var.tikv_count + 2}" - additional_userdata = "${file("userdata.sh")}" - }, - { # tidb - name = "tidb_worker_group" - key_name = "${module.key-pair.key_name}" - instance_type = "${var.tidb_instance_type}" # c4.4xlarge 16c, 30G - root_volume_type = "gp2" - root_volume_size = "100" - public_ip = false - kubelet_extra_args = "--register-with-taints=dedicated=tidb:NoSchedule --node-labels=dedicated=tidb" - asg_desired_capacity = "${var.tidb_count}" - asg_max_size = "${var.tidb_count + 2}" - }, - { # monitor - name = "monitor_worker_group" - key_name = "${module.key-pair.key_name}" - instance_type = "${var.monitor_instance_type}" # c5.xlarge 4c, 8G - root_volume_type = "gp2" - root_volume_size = "100" - public_ip = false - asg_desired_capacity = 1 - asg_max_size = 3 - } - ] - - worker_group_count = "4" + subnets = split( + ",", + var.create_vpc ? join(",", module.vpc.private_subnets) : join(",", var.subnets), + ) + vpc_id = var.create_vpc ? module.vpc.vpc_id : var.vpc_id tags = { app = "tidb" } } - -# kubernetes and helm providers rely on EKS, but terraform provider doesn't support depends_on -# follow this link https://github.com/hashicorp/terraform/issues/2430#issuecomment-370685911 -# we have the following hack -resource "local_file" "kubeconfig" { - # HACK: depends_on for the helm and kubernetes provider - # Passing provider configuration value via a local_file - depends_on = ["module.eks"] - sensitive_content = "${module.eks.kubeconfig}" - filename = "${path.module}/credentials/kubeconfig_${var.cluster_name}" -} - -# kubernetes provider can't use computed config_path right now, see issue: -# https://github.com/terraform-providers/terraform-provider-kubernetes/issues/142 -# so we don't use kubernetes provider to retrieve tidb and monitor connection info, -# instead we use external data source. -# provider "kubernetes" { -# config_path = "${local_file.kubeconfig.filename}" -# } - -provider "helm" { - insecure = true - # service_account = "tiller" - # install_tiller = true # currently this doesn't work, so we install tiller in the local-exec provisioner. See https://github.com/terraform-providers/terraform-provider-helm/issues/148 - kubernetes { - config_path = "${local_file.kubeconfig.filename}" - } -} - -resource "null_resource" "setup-env" { - depends_on = ["module.eks"] - - provisioner "local-exec" { - working_dir = "${path.module}" - command = < 8, default thread pool size for coprocessors - # will be set to tikv.resources.limits.cpu * 0.8. - # readpoolCoprocessorConcurrency: 8 - - # scheduler's worker pool size, should increase it in heavy write cases, - # also should less than total cpu cores. - # storageSchedulerWorkerPoolSize: 4 - -tidb: - replicas: ${tidb_replicas} - # The secret name of root password, you can create secret with following command: - # kubectl create secret generic tidb-secret --from-literal=root= --namespace= - # If unset, the root password will be empty and you can set it after connecting - # passwordSecretName: tidb-secret - # initSql is the SQL statements executed after the TiDB cluster is bootstrapped. - # initSql: |- - # create database app; - image: "pingcap/tidb:${cluster_version}" - # Image pull policy. - imagePullPolicy: IfNotPresent - logLevel: info - preparedPlanCacheEnabled: false - preparedPlanCacheCapacity: 100 - # Enable local latches for transactions. Enable it when - # there are lots of conflicts between transactions. - txnLocalLatchesEnabled: false - txnLocalLatchesCapacity: "10240000" - # The limit of concurrent executed sessions. - tokenLimit: "1000" - # Set the memory quota for a query in bytes. Default: 32GB - memQuotaQuery: "34359738368" - # The limitation of the number for the entries in one transaction. - # If using TiKV as the storage, the entry represents a key/value pair. - # WARNING: Do not set the value too large, otherwise it will make a very large impact on the TiKV cluster. - # Please adjust this configuration carefully. - txnEntryCountLimit: "300000" - # The limitation of the size in byte for the entries in one transaction. - # If using TiKV as the storage, the entry represents a key/value pair. - # WARNING: Do not set the value too large, otherwise it will make a very large impact on the TiKV cluster. - # Please adjust this configuration carefully. - txnTotalSizeLimit: "104857600" - # enableBatchDml enables batch commit for the DMLs - enableBatchDml: false - # check mb4 value in utf8 is used to control whether to check the mb4 characters when the charset is utf8. - checkMb4ValueInUtf8: true - # treat-old-version-utf8-as-utf8mb4 use for upgrade compatibility. Set to true will treat old version table/column UTF8 charset as UTF8MB4. - treatOldVersionUtf8AsUtf8mb4: true - # lease is schema lease duration, very dangerous to change only if you know what you do. - lease: 45s - # Max CPUs to use, 0 use number of CPUs in the machine. - maxProcs: 0 - resources: - limits: {} - # cpu: 16000m - # memory: 16Gi - requests: {} - # cpu: 12000m - # memory: 12Gi - nodeSelector: - dedicated: tidb - # kind: tidb - # zone: cn-bj1-01,cn-bj1-02 - # region: cn-bj1 - tolerations: - - key: dedicated - operator: Equal - value: tidb - effect: "NoSchedule" - maxFailoverCount: 3 - service: - type: LoadBalancer - exposeStatus: true - annotations: - service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 - service.beta.kubernetes.io/aws-load-balancer-type: nlb - # separateSlowLog: true - slowLogTailer: - image: busybox:1.26.2 - resources: - limits: - cpu: 100m - memory: 50Mi - requests: - cpu: 20m - memory: 5Mi - - # tidb plugin configuration - plugin: - # enable plugin or not - enable: false - # the start argument to specify the folder containing - directory: /plugins - # the start argument to specify the plugin id (name "-" version) that needs to be loaded, e.g. 'conn_limit-1'. - list: ["whitelist-1"] - -# mysqlClient is used to set password for TiDB -# it must has Python MySQL client installed -mysqlClient: - image: tnir/mysqlclient - imagePullPolicy: IfNotPresent - -monitor: - create: true - # Also see rbac.create - # If you set rbac.create to false, you need to provide a value here. - # If you set rbac.create to true, you should leave this empty. - # serviceAccount: - persistent: true - storageClassName: ebs-gp2 - storage: 500Gi - grafana: - create: true - image: grafana/grafana:6.0.1 - imagePullPolicy: IfNotPresent - logLevel: info - resources: - limits: {} - # cpu: 8000m - # memory: 8Gi - requests: {} - # cpu: 4000m - # memory: 4Gi - username: admin - password: admin - config: - # Configure Grafana using environment variables except GF_PATHS_DATA, GF_SECURITY_ADMIN_USER and GF_SECURITY_ADMIN_PASSWORD - # Ref https://grafana.com/docs/installation/configuration/#using-environment-variables - GF_AUTH_ANONYMOUS_ENABLED: %{ if monitor_enable_anonymous_user }"true"%{ else }"false"%{ endif } - GF_AUTH_ANONYMOUS_ORG_NAME: "Main Org." - GF_AUTH_ANONYMOUS_ORG_ROLE: "Viewer" - # if grafana is running behind a reverse proxy with subpath http://foo.bar/grafana - # GF_SERVER_DOMAIN: foo.bar - # GF_SERVER_ROOT_URL: "%(protocol)s://%(domain)s/grafana/" - service: - type: LoadBalancer - prometheus: - image: prom/prometheus:v2.2.1 - imagePullPolicy: IfNotPresent - logLevel: info - resources: - limits: {} - # cpu: 8000m - # memory: 8Gi - requests: {} - # cpu: 4000m - # memory: 4Gi - service: - type: NodePort - reserveDays: 12 - # alertmanagerURL: "" - nodeSelector: {} - # kind: monitor - # zone: cn-bj1-01,cn-bj1-02 - # region: cn-bj1 - tolerations: [] - # - key: node-role - # operator: Equal - # value: tidb - # effect: "NoSchedule" - -binlog: - pump: - create: false - replicas: 1 - image: "pingcap/tidb-binlog:${cluster_version}" - imagePullPolicy: IfNotPresent - logLevel: info - # storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer. - # different classes might map to quality-of-service levels, or to backup policies, - # or to arbitrary policies determined by the cluster administrators. - # refer to https://kubernetes.io/docs/concepts/storage/storage-classes - storageClassName: local-storage - storage: 10Gi - syncLog: true - # a integer value to control expiry date of the binlog data, indicates for how long (in days) the binlog data would be stored. - # must bigger than 0 - gc: 7 - # number of seconds between heartbeat ticks (in 2 seconds) - heartbeatInterval: 2 - - drainer: - create: false - image: "pingcap/tidb-binlog:${cluster_version}" - imagePullPolicy: IfNotPresent - logLevel: info - # storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer. - # different classes might map to quality-of-service levels, or to backup policies, - # or to arbitrary policies determined by the cluster administrators. - # refer to https://kubernetes.io/docs/concepts/storage/storage-classes - storageClassName: local-storage - storage: 10Gi - # parallel worker count (default 16) - workerCount: 16 - # the interval time (in seconds) of detect pumps' status (default 10) - detectInterval: 10 - # disbale detect causality - disableDetect: false - # disable dispatching sqls that in one same binlog; if set true, work-count and txn-batch would be useless - disableDispatch: false - # # disable sync these schema - ignoreSchemas: "INFORMATION_SCHEMA,PERFORMANCE_SCHEMA,mysql,test" - # if drainer donesn't have checkpoint, use initial commitTS to initial checkpoint - initialCommitTs: 0 - # enable safe mode to make syncer reentrant - safeMode: false - # number of binlog events in a transaction batch (default 20) - txnBatch: 20 - # downstream storage, equal to --dest-db-type - # valid values are "mysql", "pb", "kafka" - destDBType: pb - mysql: {} - # host: "127.0.0.1" - # user: "root" - # password: "" - # port: 3306 - # # Time and size limits for flash batch write - # timeLimit: "30s" - # sizeLimit: "100000" - kafka: {} - # only need config one of zookeeper-addrs and kafka-addrs, will get kafka address if zookeeper-addrs is configed. - # zookeeperAddrs: "127.0.0.1:2181" - # kafkaAddrs: "127.0.0.1:9092" - # kafkaVersion: "0.8.2.0" - -scheduledBackup: - create: false - binlogImage: "pingcap/tidb-binlog:${cluster_version}" - binlogImagePullPolicy: IfNotPresent - # https://github.com/tennix/tidb-cloud-backup - mydumperImage: pingcap/tidb-cloud-backup:latest - mydumperImagePullPolicy: IfNotPresent - # storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer. - # different classes might map to quality-of-service levels, or to backup policies, - # or to arbitrary policies determined by the cluster administrators. - # refer to https://kubernetes.io/docs/concepts/storage/storage-classes - storageClassName: local-storage - storage: 100Gi - # https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule - schedule: "0 0 * * *" - # https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#suspend - suspend: false - # https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#jobs-history-limits - successfulJobsHistoryLimit: 3 - failedJobsHistoryLimit: 1 - # https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#starting-deadline - startingDeadlineSeconds: 3600 - # https://github.com/maxbube/mydumper/blob/master/docs/mydumper_usage.rst#options - options: "--chunk-filesize=100" - # secretName is the name of the secret which stores user and password used for backup - # Note: you must give the user enough privilege to do the backup - # you can create the secret by: - # kubectl create secret generic backup-secret --from-literal=user=root --from-literal=password= - secretName: backup-secret - # backup to gcp - gcp: {} - # bucket: "" - # secretName is the name of the secret which stores the gcp service account credentials json file - # The service account must have read/write permission to the above bucket. - # Read the following document to create the service account and download the credentials file as credentials.json: - # https://cloud.google.com/docs/authentication/production#obtaining_and_providing_service_account_credentials_manually - # And then create the secret by: kubectl create secret generic gcp-backup-secret --from-file=./credentials.json - # secretName: gcp-backup-secret - - # backup to ceph object storage - ceph: {} - # endpoint: "" - # bucket: "" - # secretName is the name of the secret which stores ceph object store access key and secret key - # You can create the secret by: - # kubectl create secret generic ceph-backup-secret --from-literal=access_key= --from-literal=secret_key= - # secretName: ceph-backup-secret - -metaInstance: "{{ $labels.instance }}" -metaType: "{{ $labels.type }}" -metaValue: "{{ $value }}" diff --git a/deploy/aws/tidb-cluster/cluster.tf b/deploy/aws/tidb-cluster/cluster.tf new file mode 100644 index 0000000000..1a53b8a90a --- /dev/null +++ b/deploy/aws/tidb-cluster/cluster.tf @@ -0,0 +1,86 @@ +resource "local_file" "kubeconfig" { + content = var.eks_info.kubeconfig + filename = "${path.module}/kubeconfig_${var.cluster_name}.yaml" +} + +resource "null_resource" "deploy-cluster" { + depends_on = [local_file.kubeconfig] + + provisioner "local-exec" { + working_dir = path.module + + command = < /etc/security/limits.d/99-tidb.conf +root soft nofile 1000000 +root hard nofile 1000000 +root soft core unlimited +root soft stack 10240 +EOF +# config docker ulimit +cp /usr/lib/systemd/system/docker.service /etc/systemd/system/docker.service +sed -i 's/LimitNOFILE=infinity/LimitNOFILE=1048576/' /etc/systemd/system/docker.service +sed -i 's/LimitNPROC=infinity/LimitNPROC=1048576/' /etc/systemd/system/docker.service +systemctl daemon-reload +systemctl restart docker + +# Bootstrap and join the cluster +/etc/eks/bootstrap.sh --b64-cluster-ca '${cluster_auth_base64}' --apiserver-endpoint '${endpoint}' ${bootstrap_extra_args} --kubelet-extra-args '${kubelet_extra_args}' '${cluster_name}' + +# Allow user supplied userdata code +${additional_userdata} diff --git a/deploy/aws/tidb-cluster/values/default.yaml b/deploy/aws/tidb-cluster/values/default.yaml new file mode 100644 index 0000000000..e5fc1423b0 --- /dev/null +++ b/deploy/aws/tidb-cluster/values/default.yaml @@ -0,0 +1,9 @@ +timezone: UTC + +pd: + logLevel: info +tikv: + logLevel: info + syncLog: true +tidb: + logLevel: info diff --git a/deploy/aws/tidb-cluster/variables.tf b/deploy/aws/tidb-cluster/variables.tf new file mode 100644 index 0000000000..c937c95341 --- /dev/null +++ b/deploy/aws/tidb-cluster/variables.tf @@ -0,0 +1,244 @@ +variable "subnets" { + description = "A list of subnets to place the EKS cluster and workers within." + type = list(string) +} + +variable "tags" { + description = "A map of tags to add to all resources." + type = map(string) + default = {} +} + +variable "worker_groups" { + description = "A list of maps defining worker group configurations to be defined using AWS Launch Configurations. See workers_group_defaults for valid keys." + type = list(map(string)) + + default = [ + { + "name" = "default" + }, + ] +} + +variable "worker_group_count" { + description = "The number of maps contained within the worker_groups list." + type = string + default = "1" +} + +variable "workers_group_defaults" { + description = "Override default values for target groups. See workers_group_defaults_defaults in local.tf for valid keys." + type = map(string) + default = {} +} + +variable "worker_group_tags" { + description = "A map defining extra tags to be applied to the worker group ASG." + type = map(list(string)) + + default = { + default = [] + } +} + +variable "worker_groups_launch_template" { + description = "A list of maps defining worker group configurations to be defined using AWS Launch Templates. See workers_group_defaults for valid keys." + type = list(map(string)) + + default = [ + { + "name" = "default" + }, + ] +} + +variable "worker_group_launch_template_count" { + description = "The number of maps contained within the worker_groups_launch_template list." + type = string + default = "0" +} + +variable "workers_group_launch_template_defaults" { + description = "Override default values for target groups. See workers_group_defaults_defaults in local.tf for valid keys." + type = map(string) + default = {} +} + +variable "worker_group_launch_template_tags" { + description = "A map defining extra tags to be applied to the worker group template ASG." + type = map(list(string)) + + default = { + default = [] + } +} + +variable "worker_ami_name_filter" { + description = "Additional name filter for AWS EKS worker AMI. Default behaviour will get latest for the cluster_version but could be set to a release from amazon-eks-ami, e.g. \"v20190220\"" + default = "v*" +} + +variable "worker_additional_security_group_ids" { + description = "A list of additional security group ids to attach to worker instances" + type = list(string) + default = [] +} + +variable "worker_sg_ingress_from_port" { + description = "Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443)." + default = "1025" +} + +variable "workers_additional_policies" { + description = "Additional policies to be added to workers" + type = list(string) + default = [] +} + +variable "workers_additional_policies_count" { + default = 0 +} + +variable "kubeconfig_aws_authenticator_command" { + description = "Command to use to fetch AWS EKS credentials." + default = "aws-iam-authenticator" +} + +variable "kubeconfig_aws_authenticator_command_args" { + description = "Default arguments passed to the authenticator command. Defaults to [token -i $cluster_name]." + type = list(string) + default = [] +} + +variable "kubeconfig_aws_authenticator_additional_args" { + description = "Any additional arguments to pass to the authenticator such as the role to assume. e.g. [\"-r\", \"MyEksRole\"]." + type = list(string) + default = [] +} + +variable "kubeconfig_aws_authenticator_env_variables" { + description = "Environment variables that should be used when executing the authenticator. e.g. { AWS_PROFILE = \"eks\"}." + type = map(string) + default = {} +} + +variable "cluster_create_timeout" { + description = "Timeout value when creating the EKS cluster." + default = "15m" +} + +variable "cluster_delete_timeout" { + description = "Timeout value when deleting the EKS cluster." + default = "15m" +} + +variable "local_exec_interpreter" { + description = "Command to run for local-exec resources. Must be a shell-style interpreter. If you are on Windows Git Bash is a good choice." + type = list(string) + default = ["/bin/sh", "-c"] +} + +variable "cluster_create_security_group" { + description = "Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id`." + default = true +} + +variable "worker_create_security_group" { + description = "Whether to create a security group for the workers or attach the workers to `worker_security_group_id`." + default = true +} + +variable "permissions_boundary" { + description = "If provided, all IAM roles will be created with this permissions boundary attached." + default = "" +} + +variable "iam_path" { + description = "If provided, all IAM roles will be created on this path." + default = "/" +} + +variable "cluster_endpoint_private_access" { + description = "Indicates whether or not the Amazon EKS private API server endpoint is enabled." + default = false +} + +variable "cluster_endpoint_public_access" { + description = "Indicates whether or not the Amazon EKS public API server endpoint is enabled." + default = true +} + + + + +variable "operator_version" { + description = "tidb operator version" + default = "v1.0.0-beta.3" +} + +variable "cluster_name" { + type = string + description = "tidb cluster name" +} + +variable "cluster_version" { + type = string + default = "v3.0.0-rc.2" +} + +variable "ssh_key_name" { + type = string +} + +variable "pd_count" { + type = number + default = 1 +} + +variable "tikv_count" { + type = number + default = 1 +} + +variable "tidb_count" { + type = number + default = 1 +} + +variable "pd_instance_type" { + type = string + default = "c5d.large" +} + +variable "tikv_instance_type" { + type = string + default = "c5d.large" +} + +variable "tidb_instance_type" { + type = string + default = "c5d.large" +} + +variable "monitor_instance_type" { + type = string + default = "c5d.large" +} + +variable "monitor_storage_size" { + type = string + default = "100Gi" +} + +variable "monitor_enable_anonymous_user" { + type = bool + default = false +} + +variable "override_values" { + type = string +} + +variable "eks_info" { + description = "eks info" +} diff --git a/deploy/aws/tidb-cluster/workers.tf b/deploy/aws/tidb-cluster/workers.tf new file mode 100644 index 0000000000..59b8b24c15 --- /dev/null +++ b/deploy/aws/tidb-cluster/workers.tf @@ -0,0 +1,190 @@ +# Worker Groups using Launch Configurations + +resource "aws_autoscaling_group" "workers" { + name_prefix = "${var.eks_info.name}-${lookup(local.tidb_cluster_worker_groups[count.index], "name", count.index)}" + desired_capacity = lookup( + local.tidb_cluster_worker_groups[count.index], + "asg_desired_capacity", + local.workers_group_defaults["asg_desired_capacity"], + ) + max_size = lookup( + local.tidb_cluster_worker_groups[count.index], + "asg_max_size", + local.workers_group_defaults["asg_max_size"], + ) + min_size = lookup( + local.tidb_cluster_worker_groups[count.index], + "asg_min_size", + local.workers_group_defaults["asg_min_size"], + ) + force_delete = false + # target_group_arns = compact( + # split( + # ",", + # coalesce( + # lookup(local.tidb_cluster_worker_groups[count.index], "target_group_arns", ""), + # local.workers_group_defaults["target_group_arns"], + # ), + # ), + # ) + launch_configuration = element(aws_launch_configuration.workers.*.id, count.index) + vpc_zone_identifier = split( + ",", + coalesce( + lookup(local.tidb_cluster_worker_groups[count.index], "subnets", ""), + local.workers_group_defaults["subnets"], + ), + ) + protect_from_scale_in = false + # suspended_processes = "" # A comma delimited string of processes to to suspend. i.e. AZRebalance,HealthCheck,ReplaceUnhealthy + # enabled_metrics = "" # A comma delimited list of metrics to be collected i.e. GroupMinSize,GroupMaxSize,GroupDesiredCapacity + count = local.worker_group_count + placement_group = "" # The name of the placement group into which to launch the instances, if any. + + tags = concat( + [ + { + "key" = "Name" + "value" = "${var.eks_info.name}-${lookup(local.tidb_cluster_worker_groups[count.index], "name", count.index)}-eks_asg" + "propagate_at_launch" = true + }, + { + "key" = "kubernetes.io/cluster/${var.eks_info.name}" + "value" = "owned" + "propagate_at_launch" = true + }, + { + "key" = "k8s.io/cluster-autoscaler/${lookup( + local.tidb_cluster_worker_groups[count.index], + "autoscaling_enabled", + local.workers_group_defaults["autoscaling_enabled"], + ) == 1 ? "enabled" : "disabled"}" + "value" = "true" + "propagate_at_launch" = false + }, + # { + # "key" = "k8s.io/cluster-autoscaler/${var.eks_info.name}" + # "value" = "" + # "propagate_at_launch" = false + # }, + { + "key" = "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage" + "value" = "${lookup( + local.tidb_cluster_worker_groups[count.index], + "root_volume_size", + local.workers_group_defaults["root_volume_size"], + )}Gi" + "propagate_at_launch" = false + }, + ], + local.asg_tags, + var.worker_group_tags[contains( + keys(var.worker_group_tags), + lookup(local.tidb_cluster_worker_groups[count.index], "name", count.index), + ) ? lookup(local.tidb_cluster_worker_groups[count.index], "name", count.index) : "default"], + ) + + + lifecycle { + create_before_destroy = true + # ignore_changes = ["desired_capacity"] + } +} + +resource "aws_launch_configuration" "workers" { + name_prefix = "${var.eks_info.name}-${lookup(local.tidb_cluster_worker_groups[count.index], "name", count.index)}" + associate_public_ip_address = lookup( + local.tidb_cluster_worker_groups[count.index], + "public_ip", + local.workers_group_defaults["public_ip"], + ) + security_groups = concat([var.eks_info.worker_security_group_id], var.worker_additional_security_group_ids, compact( + split( + ",", + lookup( + local.tidb_cluster_worker_groups[count.index], + "additional_security_group_ids", + local.workers_group_defaults["additional_security_group_ids"], + ), + ), + )) + iam_instance_profile = element(var.eks_info.worker_iam_instance_profile.*.id, count.index) + image_id = lookup( + local.tidb_cluster_worker_groups[count.index], + "ami_id", + local.workers_group_defaults["ami_id"], + ) + instance_type = lookup( + local.tidb_cluster_worker_groups[count.index], + "instance_type", + local.workers_group_defaults["instance_type"], + ) + key_name = lookup( + local.tidb_cluster_worker_groups[count.index], + "key_name", + local.workers_group_defaults["key_name"], + ) + user_data_base64 = base64encode(element(data.template_file.userdata.*.rendered, count.index)) + ebs_optimized = lookup( + local.tidb_cluster_worker_groups[count.index], + "ebs_optimized", + lookup( + local.ebs_optimized, + lookup( + local.tidb_cluster_worker_groups[count.index], + "instance_type", + local.workers_group_defaults["instance_type"], + ), + false, + ), + ) + enable_monitoring = lookup( + local.tidb_cluster_worker_groups[count.index], + "enable_monitoring", + local.workers_group_defaults["enable_monitoring"], + ) + spot_price = lookup( + local.tidb_cluster_worker_groups[count.index], + "spot_price", + local.workers_group_defaults["spot_price"], + ) + placement_tenancy = lookup( + local.tidb_cluster_worker_groups[count.index], + "placement_tenancy", + local.workers_group_defaults["placement_tenancy"], + ) + count = local.worker_group_count + + lifecycle { + create_before_destroy = true + } + + root_block_device { + volume_size = lookup( + local.tidb_cluster_worker_groups[count.index], + "root_volume_size", + local.workers_group_defaults["root_volume_size"], + ) + volume_type = lookup( + local.tidb_cluster_worker_groups[count.index], + "root_volume_type", + local.workers_group_defaults["root_volume_type"], + ) + iops = lookup( + local.tidb_cluster_worker_groups[count.index], + "root_iops", + local.workers_group_defaults["root_iops"], + ) + delete_on_termination = true + } +} + +resource "null_resource" "tags_as_list_of_maps" { + count = length(keys(var.tags)) + + triggers = { + key = element(keys(var.tags), count.index) + value = element(values(var.tags), count.index) + propagate_at_launch = "true" + } +} diff --git a/deploy/aws/tidb-cluster/workers_launch_template.tf b/deploy/aws/tidb-cluster/workers_launch_template.tf new file mode 100644 index 0000000000..cd5205f84a --- /dev/null +++ b/deploy/aws/tidb-cluster/workers_launch_template.tf @@ -0,0 +1,342 @@ +# Worker Groups using Launch Templates + +resource "aws_autoscaling_group" "workers_launch_template" { + name_prefix = "${var.eks_info.name}-${lookup( + var.worker_groups_launch_template[count.index], + "name", + count.index, + )}" + desired_capacity = lookup( + var.worker_groups_launch_template[count.index], + "asg_desired_capacity", + local.workers_group_launch_template_defaults["asg_desired_capacity"], + ) + max_size = lookup( + var.worker_groups_launch_template[count.index], + "asg_max_size", + local.workers_group_launch_template_defaults["asg_max_size"], + ) + min_size = lookup( + var.worker_groups_launch_template[count.index], + "asg_min_size", + local.workers_group_launch_template_defaults["asg_min_size"], + ) + force_delete = lookup( + var.worker_groups_launch_template[count.index], + "asg_force_delete", + local.workers_group_launch_template_defaults["asg_force_delete"], + ) + # target_group_arns = compact( + # split( + # ",", + # coalesce( + # lookup( + # var.worker_groups_launch_template[count.index], + # "target_group_arns", + # "", + # ), + # local.workers_group_launch_template_defaults["target_group_arns"], + # ), + # ), + # ) + + mixed_instances_policy { + instances_distribution { + on_demand_allocation_strategy = lookup( + var.worker_groups_launch_template[count.index], + "on_demand_allocation_strategy", + local.workers_group_launch_template_defaults["on_demand_allocation_strategy"], + ) + on_demand_base_capacity = lookup( + var.worker_groups_launch_template[count.index], + "on_demand_base_capacity", + local.workers_group_launch_template_defaults["on_demand_base_capacity"], + ) + on_demand_percentage_above_base_capacity = lookup( + var.worker_groups_launch_template[count.index], + "on_demand_percentage_above_base_capacity", + local.workers_group_launch_template_defaults["on_demand_percentage_above_base_capacity"], + ) + spot_allocation_strategy = lookup( + var.worker_groups_launch_template[count.index], + "spot_allocation_strategy", + local.workers_group_launch_template_defaults["spot_allocation_strategy"], + ) + spot_instance_pools = lookup( + var.worker_groups_launch_template[count.index], + "spot_instance_pools", + local.workers_group_launch_template_defaults["spot_instance_pools"], + ) + spot_max_price = lookup( + var.worker_groups_launch_template[count.index], + "spot_max_price", + local.workers_group_launch_template_defaults["spot_max_price"], + ) + } + + launch_template { + launch_template_specification { + launch_template_id = element( + aws_launch_template.workers_launch_template.*.id, + count.index, + ) + version = "$Latest" + } + + override { + instance_type = lookup( + var.worker_groups_launch_template[count.index], + "instance_type", + local.workers_group_launch_template_defaults["instance_type"], + ) + } + + override { + instance_type = lookup( + var.worker_groups_launch_template[count.index], + "override_instance_type", + local.workers_group_launch_template_defaults["override_instance_type"], + ) + } + } + } + + vpc_zone_identifier = split( + ",", + coalesce( + lookup( + var.worker_groups_launch_template[count.index], + "subnets", + "", + ), + local.workers_group_launch_template_defaults["subnets"], + ), + ) + protect_from_scale_in = lookup( + var.worker_groups_launch_template[count.index], + "protect_from_scale_in", + local.workers_group_launch_template_defaults["protect_from_scale_in"], + ) + # suspended_processes = compact( + # split( + # ",", + # coalesce( + # lookup( + # var.worker_groups_launch_template[count.index], + # "suspended_processes", + # "", + # ), + # local.workers_group_launch_template_defaults["suspended_processes"], + # ), + # ), + # ) + # enabled_metrics = compact( + # split( + # ",", + # coalesce( + # lookup( + # var.worker_groups_launch_template[count.index], + # "enabled_metrics", + # "", + # ), + # local.workers_group_launch_template_defaults["enabled_metrics"], + # ), + # ), + # ) + count = var.worker_group_launch_template_count + + tags = concat( + [ + { + "key" = "Name" + "value" = "${var.eks_info.name}-${lookup( + var.worker_groups_launch_template[count.index], + "name", + count.index, + )}-eks_asg" + "propagate_at_launch" = true + }, + { + "key" = "kubernetes.io/cluster/${var.eks_info.name}" + "value" = "owned" + "propagate_at_launch" = true + }, + { + "key" = "k8s.io/cluster-autoscaler/${lookup( + var.worker_groups_launch_template[count.index], + "autoscaling_enabled", + local.workers_group_launch_template_defaults["autoscaling_enabled"], + ) == 1 ? "enabled" : "disabled"}" + "value" = "true" + "propagate_at_launch" = false + }, + # { + # "key" = "k8s.io/cluster-autoscaler/${var.eks_info.name}" + # "value" = "" + # "propagate_at_launch" = false + # }, + { + "key" = "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage" + "value" = "${lookup( + var.worker_groups_launch_template[count.index], + "root_volume_size", + local.workers_group_launch_template_defaults["root_volume_size"], + )}Gi" + "propagate_at_launch" = false + }, + ], + local.asg_tags, + var.worker_group_launch_template_tags[contains( + keys(var.worker_group_launch_template_tags), + lookup( + var.worker_groups_launch_template[count.index], + "name", + count.index, + ), + ) ? lookup( + var.worker_groups_launch_template[count.index], + "name", + count.index, + ) : "default"], + ) + + lifecycle { + create_before_destroy = true + + ignore_changes = [desired_capacity] + } +} + +resource "aws_launch_template" "workers_launch_template" { + name_prefix = "${var.eks_info.name}-${lookup( + var.worker_groups_launch_template[count.index], + "name", + count.index, + )}" + + network_interfaces { + associate_public_ip_address = lookup( + var.worker_groups_launch_template[count.index], + "public_ip", + local.workers_group_launch_template_defaults["public_ip"], + ) + security_groups = concat([var.eks_info.worker_security_group_id], var.worker_additional_security_group_ids, compact( + split( + ",", + lookup( + var.worker_groups_launch_template[count.index], + "additional_security_group_ids", + local.workers_group_launch_template_defaults["additional_security_group_ids"], + ), + ), + )) + } + + iam_instance_profile { + name = element( + aws_iam_instance_profile.workers_launch_template.*.name, + count.index, + ) + } + + image_id = lookup( + var.worker_groups_launch_template[count.index], + "ami_id", + local.workers_group_launch_template_defaults["ami_id"], + ) + instance_type = lookup( + var.worker_groups_launch_template[count.index], + "instance_type", + local.workers_group_launch_template_defaults["instance_type"], + ) + key_name = lookup( + var.worker_groups_launch_template[count.index], + "key_name", + local.workers_group_launch_template_defaults["key_name"], + ) + user_data = base64encode( + element( + data.template_file.launch_template_userdata.*.rendered, + count.index, + ), + ) + ebs_optimized = lookup( + var.worker_groups_launch_template[count.index], + "ebs_optimized", + lookup( + local.ebs_optimized, + lookup( + var.worker_groups_launch_template[count.index], + "instance_type", + local.workers_group_launch_template_defaults["instance_type"], + ), + false, + ), + ) + + monitoring { + enabled = lookup( + var.worker_groups_launch_template[count.index], + "enable_monitoring", + local.workers_group_launch_template_defaults["enable_monitoring"], + ) + } + + placement { + tenancy = lookup( + var.worker_groups_launch_template[count.index], + "placement_tenancy", + local.workers_group_launch_template_defaults["placement_tenancy"], + ) + } + + count = var.worker_group_launch_template_count + + lifecycle { + create_before_destroy = true + } + + block_device_mappings { + device_name = data.aws_ami.eks_worker.root_device_name + + ebs { + volume_size = lookup( + var.worker_groups_launch_template[count.index], + "root_volume_size", + local.workers_group_launch_template_defaults["root_volume_size"], + ) + volume_type = lookup( + var.worker_groups_launch_template[count.index], + "root_volume_type", + local.workers_group_launch_template_defaults["root_volume_type"], + ) + iops = lookup( + var.worker_groups_launch_template[count.index], + "root_iops", + local.workers_group_launch_template_defaults["root_iops"], + ) + encrypted = lookup( + var.worker_groups_launch_template[count.index], + "root_encrypted", + local.workers_group_launch_template_defaults["root_encrypted"], + ) + kms_key_id = lookup( + var.worker_groups_launch_template[count.index], + "kms_key_id", + local.workers_group_launch_template_defaults["kms_key_id"], + ) + delete_on_termination = true + } + } +} + +resource "aws_iam_instance_profile" "workers_launch_template" { + name_prefix = var.eks_info.name + role = lookup( + var.worker_groups_launch_template[count.index], + "iam_role_id", + local.workers_group_launch_template_defaults["iam_role_id"], + ) + count = var.worker_group_launch_template_count + path = var.iam_path +} diff --git a/deploy/aws/variables.tf b/deploy/aws/variables.tf index d250eb9e6d..c93a5d4782 100644 --- a/deploy/aws/variables.tf +++ b/deploy/aws/variables.tf @@ -1,111 +1,57 @@ variable "region" { description = "aws region" - default = "us-east-2" -} - -variable "ingress_cidr" { - description = "IP cidr that allowed to access bastion ec2 instance" - default = ["0.0.0.0/0"] # Note: Please restrict your ingress to only necessary IPs. Opening to 0.0.0.0/0 can lead to security vulnerabilities. + # supported regions: + # US: us-east-1, us-east-2, us-west-2 + # Asia Pacific: ap-south-1, ap-northeast-2, ap-southeast-1, ap-southeast-2, ap-northeast-1 + # Europe: eu-central-1, eu-west-1, eu-west-2, eu-west-3, eu-north-1 + default = "us-west-2" } variable "create_vpc" { description = "Create a new VPC or not, if true the vpc_cidr/private_subnets/public_subnets must be set correctly, otherwise vpc_id/subnet_ids must be set correctly" - default = true + default = true } variable "vpc_cidr" { description = "vpc cidr" - default = "10.0.0.0/16" + default = "10.0.0.0/16" } variable "private_subnets" { description = "vpc private subnets" - type = "list" - default = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] + type = list(string) + default = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] } variable "public_subnets" { description = "vpc public subnets" - type = "list" - default = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] + type = list(string) + default = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] } variable "vpc_id" { description = "VPC id" - type = "string" - default = "vpc-c679deae" + type = string + default = "vpc-c679deae" } variable "subnets" { description = "subnet id list" - type = "list" - default = ["subnet-899e79f3", "subnet-a72d80cf", "subnet-a76d34ea"] -} - -variable "create_bastion" { - description = "Create bastion ec2 instance to access TiDB cluster" - default = true -} - -variable "bastion_ami" { - description = "bastion ami id" - default = "ami-0cd3dfa4e37921605" -} - -variable "bastion_instance_type" { - description = "bastion ec2 instance type" - default = "t2.micro" -} - -variable "cluster_name" { - description = "eks cluster name" - default = "my-cluster" -} - -variable "k8s_version" { - description = "eks cluster version" - default = "1.12" -} - -variable "tidb_version" { - description = "tidb cluster version" - default = "v3.0.0-rc.1" -} - -variable "pd_count" { - default = 3 -} - -variable "tikv_count" { - default = 3 -} - -variable "tidb_count" { - default = 2 -} - -// Be careful about changing the instance types, it may break the user data and local volume setup -variable "pd_instance_type" { - default = "m5d.xlarge" -} - -variable "tikv_instance_type" { - default = "i3.2xlarge" -} - -variable "tidb_instance_type" { - default = "c4.4xlarge" + type = list(string) + default = ["subnet-899e79f3", "subnet-a72d80cf", "subnet-a76d34ea"] } -variable "monitor_instance_type" { - default = "c5.xlarge" +variable "eks_name" { + description = "Name of the EKS cluster. Also used as a prefix in names of related resources." + default = "my-cluster" } -variable "tikv_root_volume_size" { - default = "100" +variable "eks_version" { + description = "Kubernetes version to use for the EKS cluster." + default = "1.12" } -variable "monitor_enable_anonymous_user" { - description = "Whether enabling anonymous user visiting for monitoring" - default = false +variable "operator_version" { + description = "tidb operator version" + default = "v1.0.0-beta.3" } From 192a3a94a28508a64d839a7df0861bab0fe56be5 Mon Sep 17 00:00:00 2001 From: Aylei Date: Sat, 29 Jun 2019 22:39:28 +0800 Subject: [PATCH 02/11] Multiple cluster management in AWS Signed-off-by: Aylei --- deploy/aws/README.md | 113 +++++++++++++++--- deploy/aws/aws-tutorial.tfvars | 10 ++ deploy/aws/bastion.tf | 33 +++++ deploy/aws/clusters.tf | 100 ++++++---------- deploy/aws/eks/cluster.tf | 32 +---- deploy/aws/eks/{aws_auth.tf => initialize.tf} | 7 +- deploy/aws/eks/local.tf | 29 ++--- .../manifests/local-volume-provisioner.yaml | 15 +-- deploy/aws/eks/variables.tf | 4 +- deploy/aws/eks/workers.tf | 26 ++-- deploy/aws/eks/workers_launch_template.tf | 37 ++---- deploy/aws/local.tf | 5 + deploy/aws/main.tf | 11 +- deploy/aws/outputs.tf | 35 ++---- deploy/aws/pd-userdata.sh | 2 + deploy/aws/tidb-cluster/cluster.tf | 99 +++++++++------ deploy/aws/tidb-cluster/data.tf | 6 +- deploy/aws/tidb-cluster/local.tf | 7 +- deploy/aws/tidb-cluster/outputs.tf | 4 +- deploy/aws/tidb-cluster/pre_userdata | 30 +++++ deploy/aws/tidb-cluster/values/default.yaml | 9 -- deploy/aws/tidb-cluster/variables.tf | 96 +++------------ deploy/aws/tidb-cluster/workers.tf | 24 ++-- .../tidb-cluster/workers_launch_template.tf | 24 ++-- deploy/aws/tikv-userdata.sh | 2 + deploy/aws/userdata.sh | 2 + deploy/aws/values/default.yaml | 24 ++++ deploy/aws/variables.tf | 88 +++++++++++--- 28 files changed, 480 insertions(+), 394 deletions(-) create mode 100644 deploy/aws/aws-tutorial.tfvars create mode 100644 deploy/aws/bastion.tf rename deploy/aws/eks/{aws_auth.tf => initialize.tf} (92%) create mode 100644 deploy/aws/local.tf create mode 100644 deploy/aws/tidb-cluster/pre_userdata delete mode 100644 deploy/aws/tidb-cluster/values/default.yaml create mode 100644 deploy/aws/values/default.yaml diff --git a/deploy/aws/README.md b/deploy/aws/README.md index 65aaecb9a2..b28a8afe8b 100644 --- a/deploy/aws/README.md +++ b/deploy/aws/README.md @@ -41,10 +41,10 @@ Before deploying a TiDB cluster on AWS EKS, make sure the following requirements The default setup will create a new VPC and a t2.micro instance as bastion machine, and an EKS cluster with the following ec2 instances as worker nodes: -* 3 m5d.xlarge instances for PD -* 3 i3.2xlarge instances for TiKV -* 2 c4.4xlarge instances for TiDB -* 1 c5.xlarge instance for monitor +* 3 m5.large instances for PD +* 3 c5d.4xlarge instances for TiKV +* 2 c5.4xlarge instances for TiDB +* 1 c5.2xlarge instance for monitor Use the following commands to set up the cluster: @@ -76,7 +76,7 @@ monitor_endpoint = http://abd299cc47af411e98aae02938da0762-1989524000.us-east-2. region = us-east-2 tidb_dns = abd2e3f7c7af411e98aae02938da0762-17499b76b312be02.elb.us-east-2.amazonaws.com tidb_port = 4000 -tidb_version = v3.0.0-rc.1 +tidb_version = v3.0.0 ``` > **Note:** You can use the `terraform output` command to get the output again. @@ -86,7 +86,7 @@ tidb_version = v3.0.0-rc.1 To access the deployed TiDB cluster, use the following commands to first `ssh` into the bastion machine, and then connect it via MySQL client (replace the `<>` parts with values from the output): ``` shell -ssh -i credentials/k8s-prod-.pem ec2-user@ +ssh -i credentials/.pem ec2-user@ mysql -h -P -u root ``` @@ -118,12 +118,12 @@ The initial Grafana login credentials are: To upgrade the TiDB cluster, edit the `variables.tf` file with your preferred text editor and modify the `tidb_version` variable to a higher version, and then run `terraform apply`. -For example, to upgrade the cluster to version 3.0.0-rc.1, modify the `tidb_version` to `v3.0.0-rc.2`: +For example, to upgrade the cluster to version 3.0.0-rc.1, modify the `tidb_version` to `v3.0.0`: ``` variable "tidb_version" { description = "tidb cluster version" - default = "v3.0.0-rc.2" + default = "v3.0.0" } ``` @@ -131,12 +131,12 @@ For example, to upgrade the cluster to version 3.0.0-rc.1, modify the `tidb_vers ## Scale -To scale the TiDB cluster, edit the `variables.tf` file with your preferred text editor and modify the `tikv_count` or `tidb_count` variable to your desired count, and then run `terraform apply`. +To scale the TiDB cluster, edit the `variables.tf` file with your preferred text editor and modify the `default_cluster_tikv_count` or `default_cluster_tidb_count` variable to your desired count, and then run `terraform apply`. For example, to scale out the cluster, you can modify the number of TiDB instances from 2 to 3: ``` - variable "tidb_count" { + variable "default_cluster_tidb_count" { default = 4 } ``` @@ -145,7 +145,7 @@ For example, to scale out the cluster, you can modify the number of TiDB instanc ## Customize -You can change default values in `variables.tf` (such as the cluster name and image versions) as needed. +You can change default values in `variables.tf` (such as the default cluster name and image versions) as needed. ### Customize AWS related resources @@ -161,10 +161,83 @@ Currently, the instance type of TiDB cluster component is not configurable becau ### Customize TiDB parameters -Currently, there are not many customizable TiDB parameters. And there are two ways to customize the parameters: +By default, the terraform script will pass `./values/default.yaml` to the tidb-cluster helm chart. You change the `overrides_values` of the tidb cluster module to specify a customized values file. + +The reference of the values file can be found [here]() + +## Multiple Cluster Management + +An instance of `./tidb-cluster` module correspond to a TiDB cluster in the EKS cluster. If you want to add a new TiDB cluster, you can edit `./cluster.tf` and add a new instance of `./tidb-cluster` module: + +```hcl +module example-cluster { + source = "./tidb-cluster" + + # The target EKS, required + eks_info = local.default_eks + # The subnets of node pools of this TiDB cluster, required + subnets = local.default_subnets + + # TiDB cluster name, required + cluster_name = "example-cluster" + # Helm values file, required + override_values = "values/example-cluster.yaml" + + # TiDB cluster version + cluster_version = "v3.0.0" + # SSH key of cluster nodes + ssh_key_name = module.key-pair.key_name + # PD replica number + pd_count = 3 + # TiKV instance type + pd_instance_type = "t2.xlarge" + # The storage class used by PD + pd_storage_class = "ebs-gp2" + # TiKV replica number + tikv_count = 3 + # TiKV instance type + tikv_instance_type = "t2.xlarge" + # The storage class used by TiKV, if the TiKV instance type do not have local SSD, you should change it to storage class + # of cloud disks like 'ebs-gp2'. Note that TiKV without local storage is strongly not recommended in production env. + tikv_storage_class = "local-storage" + # TiDB replica number + tidb_count = 2 + # TiDB instance type + tidb_instance_type = "t2.xlarge" + # Monitor instance type + monitor_instance_type = "t2.xlarge" + # The version of tidb-cluster helm chart + tidb_cluster_chart_version = "v1.0.0-beta.3" +} + +module other-cluster { + source = "./tidb-cluster" + + cluster_name = "other-cluster" + override_values = "values/other-cluster.yaml" + #...... +} +``` + +> **Note:** +> +> The `cluster_name` of each cluster must be unique. + +You can refer to [./tidb-cluster/variables.tf](./tidb-cluster/variables.tf) for the complete configuration reference of `./tidb-cluster` module. + +It is recommended to provide a dedicated values file for each TiDB cluster in favor of the ease of management. You can copy the `values/default.yaml` to get a reasonable default. + +You can get the DNS name of TiDB service and grafana service via kubectl. If you want terraform to print these information like the `default-cluster`, you can add `output` sections in `outputs.tf`: -* Before deploying the cluster, you can directly modify the `templates/tidb-cluster-values.yaml.tpl` file and then deploy the cluster with customized configs. -* After the cluster is running, you must run `terraform apply` again every time you make changes to the `templates/tidb-cluster-values.yaml.tpl` file, or the cluster will still be using old configs. +```hcl +output "example-cluster_tidb-dns" { + value = module.example-cluster.tidb_dns +} + +output "example-cluster_monitor-dns" { + value = module.example-cluster.monitor_dns +} +``` ## Destroy @@ -174,4 +247,14 @@ It may take some while to finish destroying the cluster. $ terraform destroy ``` -> **Note:** You have to manually delete the EBS volumes in AWS console after running `terraform destroy` if you do not need the data on the volumes anymore. +> **Note:** +> +> This will destroy your EKS cluster along with all the TiDB clusters you deployed on it. + +> **Note:** +> +> If you specify service type `LoadBalancer` for the services like the default configuration do, you have to delete these services before destroy, otherwise they will block the subnets to be destroyed. + +> **Note:** +> +> You have to manually delete the EBS volumes in AWS console after running terraform destroy if you do not need the data on the volumes anymore. diff --git a/deploy/aws/aws-tutorial.tfvars b/deploy/aws/aws-tutorial.tfvars new file mode 100644 index 0000000000..d855acf5f2 --- /dev/null +++ b/deploy/aws/aws-tutorial.tfvars @@ -0,0 +1,10 @@ +default_cluster_pd_instance_type = "c5d.large" +default_cluster_pd_tikv_instance_type = "c5d.large" +default_cluster_tidb_instance_type = "c4.large" +default_cluster_monitor_instance_type = "c5.large" + +default_cluster_pd_count = 1 +default_cluster_tikv_count = 1 +default_cluster_tidb_count = 1 + +default_cluster_cluster_name = "aws_tutorial" diff --git a/deploy/aws/bastion.tf b/deploy/aws/bastion.tf new file mode 100644 index 0000000000..23ad7c1734 --- /dev/null +++ b/deploy/aws/bastion.tf @@ -0,0 +1,33 @@ +resource "aws_security_group" "ssh" { + name = "${var.eks_name}-bastion" + description = "Allow SSH access for bastion instance" + vpc_id = var.create_vpc ? module.vpc.vpc_id : var.vpc_id + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = var.bastion_ingress_cidr + } + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +module "ec2" { + source = "terraform-aws-modules/ec2-instance/aws" + + version = "2.3.0" + name = "${var.eks_name}-bastion" + instance_count = var.create_bastion ? 1 : 0 + ami = data.aws_ami.amazon-linux-2.id + instance_type = var.bastion_instance_type + key_name = module.key-pair.key_name + associate_public_ip_address = true + monitoring = false + user_data = file("bastion-userdata") + vpc_security_group_ids = [aws_security_group.ssh.id] + subnet_ids = local.default_subnets +} \ No newline at end of file diff --git a/deploy/aws/clusters.tf b/deploy/aws/clusters.tf index 5c56a766a4..50fda10c27 100644 --- a/deploy/aws/clusters.tf +++ b/deploy/aws/clusters.tf @@ -1,69 +1,39 @@ -module "demo-cluster" { - source = "./tidb-cluster" - eks_info = module.eks.eks_info - subnets = split( - ",", - var.create_vpc ? join(",", module.vpc.private_subnets) : join(",", var.subnets), - ) +# TiDB cluster declaration example +#module "example-cluster" { +# source = "./tidb-cluster" +# eks_info = local.default_eks +# subnets = local.default_subnets +# +# # NOTE: cluster_name cannot be changed after creation +# cluster_name = "demo-cluster" +# cluster_version = "v3.0.0" +# ssh_key_name = module.key-pair.key_name +# pd_count = 1 +# pd_instance_type = "t2.xlarge" +# tikv_count = 1 +# tikv_instance_type = "t2.xlarge" +# tidb_count = 1 +# tidb_instance_type = "t2.xlarge" +# monitor_instance_type = "t2.xlarge" +# # yaml file that passed to helm to customize the release +# override_values = "values/default.yaml" +#} - cluster_name = "demo-cluster" - cluster_version = "v3.0.0-rc.2" - ssh_key_name = module.key-pair.key_name - pd_count = 1 - pd_instance_type = "t2.xlarge" - tikv_count = 1 - tikv_instance_type = "t2.xlarge" - tidb_count = 1 - tidb_instance_type = "t2.xlarge" - monitor_instance_type = "t2.xlarge" - monitor_storage_size = "100Gi" - monitor_enable_anonymous_user = true - override_values = "values/default.yaml" -} - -module "test-cluster" { - source = "./tidb-cluster" - eks_info = module.eks.eks_info - subnets = split( - ",", - var.create_vpc ? join(",", module.vpc.private_subnets) : join(",", var.subnets), - ) - - cluster_name = "test-cluster" - cluster_version = "v3.0.0-rc.1" - ssh_key_name = module.key-pair.key_name - pd_count = 1 - pd_instance_type = "t2.xlarge" - tikv_count = 1 - tikv_instance_type = "t2.xlarge" - tidb_count = 1 - tidb_instance_type = "t2.xlarge" - monitor_instance_type = "t2.xlarge" - monitor_storage_size = "100Gi" - monitor_enable_anonymous_user = true - override_values = "values/default.yaml" -} -module "prod-cluster" { - source = "./tidb-cluster" - eks_info = module.eks.eks_info - subnets = ["subnet-0043bd7c0ce42020b"] - # subnets = split( - # ",", - # var.create_vpc ? join(",", module.vpc.private_subnets) : join(",", var.subnets), - # ) +module "default-cluster" { + source = "./tidb-cluster" + eks_info = local.default_eks + subnets = local.default_subnets - cluster_name = "prod-cluster" - cluster_version = "v3.0.0-rc.1" - ssh_key_name = module.key-pair.key_name - pd_count = 1 - pd_instance_type = "t2.xlarge" - tikv_count = 3 - tikv_instance_type = "t2.xlarge" - tidb_count = 1 - tidb_instance_type = "t2.xlarge" - monitor_instance_type = "t2.xlarge" - monitor_storage_size = "100Gi" - monitor_enable_anonymous_user = true - override_values = "values/default.yaml" + cluster_name = var.default_cluster_name + cluster_version = var.default_cluster_version + ssh_key_name = module.key-pair.key_name + pd_count = var.default_cluster_pd_count + pd_instance_type = var.default_cluster_pd_instance_type + tikv_count = var.default_cluster_tikv_count + tikv_instance_type = var.default_cluster_tidb_instance_type + tidb_count = var.default_cluster_tidb_count + tidb_instance_type = var.default_cluster_tidb_instance_type + monitor_instance_type = var.default_cluster_monitor_instance_type + override_values = "values/default.yaml" } diff --git a/deploy/aws/eks/cluster.tf b/deploy/aws/eks/cluster.tf index 90e84c4064..43d4abeb84 100644 --- a/deploy/aws/eks/cluster.tf +++ b/deploy/aws/eks/cluster.tf @@ -36,7 +36,7 @@ resource "aws_security_group" "cluster" { tags = merge( var.tags, { - "Name" = "${var.cluster_name}-eks_cluster_sg" + Name = "${var.cluster_name}-eks_cluster_sg" }, ) count = var.cluster_create_security_group ? 1 : 0 @@ -81,33 +81,3 @@ resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSServicePolicy" { policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy" role = aws_iam_role.cluster.name } - -# resource "null_resource" "setup-env" { -# depends_on = [aws_eks_cluster.this] - -# # Always execute -# # triggers = { -# # deploy_timestamp = timestamp() -# # } - -# provisioner "local-exec" { -# working_dir = path.module -# command = < kube_config.yaml - echo "${null_resource.update_config_map_aws_auth[0].triggers.config_map_rendered}" > aws_auth_configmap.yaml + echo "${null_resource.update_aws_auth_and_install_operator[0].triggers.kube_config_map_rendered}" > kube_config.yaml + echo "${null_resource.update_aws_auth_and_install_operator[0].triggers.config_map_rendered}" > aws_auth_configmap.yaml kubectl apply -f aws_auth_configmap.yaml --kubeconfig kube_config.yaml && break || sleep 10 done kubectl apply -f manifests/crd.yaml @@ -37,7 +37,6 @@ EOT } triggers = { - # timestamp = timestamp() kube_config_map_rendered = data.template_file.kubeconfig.rendered config_map_rendered = data.template_file.config_map_aws_auth.rendered endpoint = aws_eks_cluster.this.endpoint diff --git a/deploy/aws/eks/local.tf b/deploy/aws/eks/local.tf index e042be29c6..ab85bf732c 100644 --- a/deploy/aws/eks/local.tf +++ b/deploy/aws/eks/local.tf @@ -1,17 +1,9 @@ locals { asg_tags = null_resource.tags_as_list_of_maps.*.triggers - # Followed recommendation http://67bricks.com/blog/?p=85 - # to workaround terraform not supporting short circut evaluation - cluster_security_group_id = coalesce( - join("", aws_security_group.cluster.*.id), - var.cluster_security_group_id, - ) + cluster_security_group_id = var.cluster_security_group_id == "" ? join("", aws_security_group.cluster.*.id) : var.cluster_security_group_id - worker_security_group_id = coalesce( - join("", aws_security_group.workers.*.id), - var.worker_security_group_id, - ) + worker_security_group_id = var.worker_security_group_id == "" ? join("", aws_security_group.workers.*.id) : var.worker_security_group_id default_iam_role_id = element(concat(aws_iam_role.workers.*.id, [""]), 0) kubeconfig_name = var.kubeconfig_name == "" ? "eks_${var.cluster_name}" : var.kubeconfig_name @@ -112,15 +104,16 @@ locals { ) eks_info = { - name = aws_eks_cluster.this.name - version = var.cluster_version - endpoint = aws_eks_cluster.this.endpoint - ca = aws_eks_cluster.this.certificate_authority[0].data - kubeconfig = data.template_file.kubeconfig.rendered - worker_iam_role = aws_iam_role.workers - worker_security_group_id = local.worker_security_group_id + name = aws_eks_cluster.this.name + version = var.cluster_version + endpoint = aws_eks_cluster.this.endpoint + ca = aws_eks_cluster.this.certificate_authority[0].data + kubeconfig = data.template_file.kubeconfig.rendered + kubeconfig_file = "${var.config_output_path}kubeconfig_${var.cluster_name}" + worker_iam_role = aws_iam_role.workers + worker_security_group_id = local.worker_security_group_id worker_iam_instance_profile = aws_iam_instance_profile.workers - vpc_id = var.vpc_id + vpc_id = var.vpc_id } ebs_optimized = { diff --git a/deploy/aws/eks/manifests/local-volume-provisioner.yaml b/deploy/aws/eks/manifests/local-volume-provisioner.yaml index a20799869b..f0f11e772a 100644 --- a/deploy/aws/eks/manifests/local-volume-provisioner.yaml +++ b/deploy/aws/eks/manifests/local-volume-provisioner.yaml @@ -14,8 +14,8 @@ metadata: data: storageClassMap: | local-storage: - hostDir: /mnt/disks - mountDir: /mnt/disks + hostDir: /mnt/local-ssd + mountDir: /mnt/local-ssd --- apiVersion: extensions/v1beta1 @@ -36,12 +36,7 @@ spec: spec: tolerations: - key: dedicated - operator: Equal - value: pd - effect: "NoSchedule" - - key: dedicated - operator: Equal - value: tikv + operator: Exists effect: "NoSchedule" serviceAccountName: local-storage-admin containers: @@ -74,7 +69,7 @@ spec: # mounting /dev in DinD environment would fail # - mountPath: /dev # name: provisioner-dev - - mountPath: /mnt/disks + - mountPath: /mnt/local-ssd name: local-disks mountPropagation: "HostToContainer" volumes: @@ -86,7 +81,7 @@ spec: # path: /dev - name: local-disks hostPath: - path: /mnt/disks + path: /mnt/local-ssd --- apiVersion: v1 diff --git a/deploy/aws/eks/variables.tf b/deploy/aws/eks/variables.tf index a4aa09340f..9a54967a01 100644 --- a/deploy/aws/eks/variables.tf +++ b/deploy/aws/eks/variables.tf @@ -94,7 +94,7 @@ variable "worker_groups" { default = [ { - "name" = "default" + name = "default" }, ] } @@ -126,7 +126,7 @@ variable "worker_groups_launch_template" { default = [ { - "name" = "default" + name = "default" }, ] } diff --git a/deploy/aws/eks/workers.tf b/deploy/aws/eks/workers.tf index 93c1279db0..d5d7c52573 100644 --- a/deploy/aws/eks/workers.tf +++ b/deploy/aws/eks/workers.tf @@ -72,23 +72,23 @@ resource "aws_autoscaling_group" "workers" { tags = concat( [ { - "key" = "Name" - "value" = "${aws_eks_cluster.this.name}-${lookup(local.control_worker_groups[count.index], "name", count.index)}-eks_asg" - "propagate_at_launch" = true + key = "Name" + value = "${aws_eks_cluster.this.name}-${lookup(local.control_worker_groups[count.index], "name", count.index)}-eks_asg" + propagate_at_launch = true }, { - "key" = "kubernetes.io/cluster/${aws_eks_cluster.this.name}" - "value" = "owned" - "propagate_at_launch" = true + key = "kubernetes.io/cluster/${aws_eks_cluster.this.name}" + value = "owned" + propagate_at_launch = true }, { - "key" = "k8s.io/cluster-autoscaler/${lookup( + key = "k8s.io/cluster-autoscaler/${lookup( local.control_worker_groups[count.index], "autoscaling_enabled", local.workers_group_defaults["autoscaling_enabled"], ) == 1 ? "enabled" : "disabled"}" - "value" = "true" - "propagate_at_launch" = false + value = "true" + propagate_at_launch = false }, # { # "key" = "k8s.io/cluster-autoscaler/${aws_eks_cluster.this.name}" @@ -96,13 +96,13 @@ resource "aws_autoscaling_group" "workers" { # "propagate_at_launch" = false # }, { - "key" = "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage" - "value" = "${lookup( + key = "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage" + value = "${lookup( local.control_worker_groups[count.index], "root_volume_size", local.workers_group_defaults["root_volume_size"], )}Gi" - "propagate_at_launch" = false + propagate_at_launch = false }, ], local.asg_tags, @@ -215,7 +215,7 @@ resource "aws_security_group" "workers" { tags = merge( var.tags, { - "Name" = "${aws_eks_cluster.this.name}-eks_worker_sg" + Name = "${aws_eks_cluster.this.name}-eks_worker_sg" "kubernetes.io/cluster/${aws_eks_cluster.this.name}" = "owned" }, ) diff --git a/deploy/aws/eks/workers_launch_template.tf b/deploy/aws/eks/workers_launch_template.tf index ec35958816..5d99dd2d0f 100644 --- a/deploy/aws/eks/workers_launch_template.tf +++ b/deploy/aws/eks/workers_launch_template.tf @@ -26,19 +26,6 @@ resource "aws_autoscaling_group" "workers_launch_template" { "asg_force_delete", local.workers_group_launch_template_defaults["asg_force_delete"], ) - # target_group_arns = compact( - # split( - # ",", - # coalesce( - # lookup( - # var.worker_groups_launch_template[count.index], - # "target_group_arns", - # "", - # ), - # local.workers_group_launch_template_defaults["target_group_arns"], - # ), - # ), - # ) mixed_instances_policy { instances_distribution { @@ -148,27 +135,27 @@ resource "aws_autoscaling_group" "workers_launch_template" { tags = concat( [ { - "key" = "Name" - "value" = "${aws_eks_cluster.this.name}-${lookup( + key = "Name" + value = "${aws_eks_cluster.this.name}-${lookup( var.worker_groups_launch_template[count.index], "name", count.index, )}-eks_asg" - "propagate_at_launch" = true + propagate_at_launch = true }, { - "key" = "kubernetes.io/cluster/${aws_eks_cluster.this.name}" - "value" = "owned" - "propagate_at_launch" = true + key = "kubernetes.io/cluster/${aws_eks_cluster.this.name}" + value = "owned" + propagate_at_launch = true }, { - "key" = "k8s.io/cluster-autoscaler/${lookup( + key = "k8s.io/cluster-autoscaler/${lookup( var.worker_groups_launch_template[count.index], "autoscaling_enabled", local.workers_group_launch_template_defaults["autoscaling_enabled"], ) == 1 ? "enabled" : "disabled"}" - "value" = "true" - "propagate_at_launch" = false + value = "true" + propagate_at_launch = false }, # { # "key" = "k8s.io/cluster-autoscaler/${aws_eks_cluster.this.name}" @@ -176,13 +163,13 @@ resource "aws_autoscaling_group" "workers_launch_template" { # "propagate_at_launch" = false # }, { - "key" = "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage" - "value" = "${lookup( + key = "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage" + value = "${lookup( var.worker_groups_launch_template[count.index], "root_volume_size", local.workers_group_launch_template_defaults["root_volume_size"], )}Gi" - "propagate_at_launch" = false + propagate_at_launch = false }, ], local.asg_tags, diff --git a/deploy/aws/local.tf b/deploy/aws/local.tf new file mode 100644 index 0000000000..cc0421aa5c --- /dev/null +++ b/deploy/aws/local.tf @@ -0,0 +1,5 @@ +locals { + default_subnets = split(",", var.create_vpc ? join(",", module.vpc.private_subnets) : join(",", var.subnets)) + default_eks = module.eks.eks_info + kubeconfig = "${path.module}/credentials/kubeconfig-${var.eks_name}" +} \ No newline at end of file diff --git a/deploy/aws/main.tf b/deploy/aws/main.tf index af85dc7e25..62a4f7d77c 100644 --- a/deploy/aws/main.tf +++ b/deploy/aws/main.tf @@ -2,10 +2,6 @@ provider "aws" { region = var.region } -locals { - kubeconfig = "${path.module}/credentials/kubeconfig-${var.eks_name}" -} - module "key-pair" { source = "./aws-key-pair" name = var.eks_name @@ -45,11 +41,8 @@ module "eks" { operator_version = var.operator_version ssh_key_name = module.key-pair.key_name config_output_path = "credentials/" - subnets = split( - ",", - var.create_vpc ? join(",", module.vpc.private_subnets) : join(",", var.subnets), - ) - vpc_id = var.create_vpc ? module.vpc.vpc_id : var.vpc_id + subnets = local.default_subnets + vpc_id = var.create_vpc ? module.vpc.vpc_id : var.vpc_id tags = { app = "tidb" diff --git a/deploy/aws/outputs.tf b/deploy/aws/outputs.tf index 403588f541..73b80ba04f 100644 --- a/deploy/aws/outputs.tf +++ b/deploy/aws/outputs.tf @@ -13,37 +13,22 @@ output "eks_endpoint" { value = module.eks.cluster_endpoint } -output "demo-cluster_tidb-dns" { - description = "tidb service endpoints" - value = module.demo-cluster.tidb_dns +output "kubeconfig_filename" { + description = "The filename of the generated kubectl config." + value = module.eks.kubeconfig_filename } -output "demo-cluster_monitor-dns" { - description = "tidb service endpoint" - value = module.demo-cluster.monitor_dns -} - -output "test-cluster_tidb-dns" { +output "default-cluster_tidb-dns" { description = "tidb service endpoints" - value = module.test-cluster.tidb_dns + value = module.default-cluster.tidb_dns } -output "test-cluster_monitor-dns" { +output "default-cluster_monitor-dns" { description = "tidb service endpoint" - value = module.test-cluster.monitor_dns + value = module.default-cluster.monitor_dns } -output "prod-cluster_tidb-dns" { - description = "tidb service endpoints" - value = module.prod-cluster.tidb_dns +output "bastion_ip" { + description = "Bastion IP address" + value = module.ec2.public_ip } - -output "prod-cluster_monitor-dns" { - description = "tidb service endpoint" - value = module.prod-cluster.monitor_dns -} - -# output "bastion_ip" { -# description = "Bastion IP address" -# value = module.ec2.public_ip -# } diff --git a/deploy/aws/pd-userdata.sh b/deploy/aws/pd-userdata.sh index d45802cdd8..e2bfc401b7 100644 --- a/deploy/aws/pd-userdata.sh +++ b/deploy/aws/pd-userdata.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + # set ulimits cat < /etc/security/limits.d/99-tidb.conf root soft nofile 1000000 diff --git a/deploy/aws/tidb-cluster/cluster.tf b/deploy/aws/tidb-cluster/cluster.tf index 1a53b8a90a..67200d437a 100644 --- a/deploy/aws/tidb-cluster/cluster.tf +++ b/deploy/aws/tidb-cluster/cluster.tf @@ -1,21 +1,25 @@ -resource "local_file" "kubeconfig" { - content = var.eks_info.kubeconfig - filename = "${path.module}/kubeconfig_${var.cluster_name}.yaml" -} +#resource "local_file" "kubeconfig_for_cleanup" { +# content = var.eks_info.kubeconfig +# filename = "${var.eks_info.kubeconfig_file}_for_${var.cluster_name}_cleanup" +#} resource "null_resource" "deploy-cluster" { - depends_on = [local_file.kubeconfig] provisioner "local-exec" { - working_dir = path.module + # EKS writes kube_config to path.cwd/kubeconfig_file + # Helm values files are managed in path.cwd + working_dir = path.cwd command = < kubeconfig_cleanup_${var.cluster_name} +#kubectl delete -n ${var.cluster_name} svc ${var.cluster_name}-pd +#kubectl delete -n ${var.cluster_name} svc ${var.cluster_name}-grafana +#kubectl get pvc -n ${var.cluster_name} -o jsonpath='{.items[*].spec.volumeName}'|fmt -1 | xargs -I {} kubectl patch pv {} -p '{"spec":{"persistentVolumeReclaimPolicy":"Delete"}}' +#kubectl delete pvc -n ${var.cluster_name} --all +#rm kubeconfig_cleanup_${var.cluster_name} +#EOT +# +# interpreter = var.local_exec_interpreter +# environment = { +# KUBECONFIG = "kubeconfig_cleanup_${var.cluster_name}" +# } +# } +#} diff --git a/deploy/aws/tidb-cluster/data.tf b/deploy/aws/tidb-cluster/data.tf index b45599faca..3e9b981ec3 100644 --- a/deploy/aws/tidb-cluster/data.tf +++ b/deploy/aws/tidb-cluster/data.tf @@ -74,10 +74,12 @@ data "template_file" "launch_template_userdata" { data "external" "tidb_elb" { depends_on = [null_resource.deploy-cluster] - program = ["bash", "-c", "kubectl --kubeconfig ${path.module}/kubeconfig_${var.cluster_name}.yaml get svc -n ${var.cluster_name} ${var.cluster_name}-tidb -o json | jq '.status.loadBalancer.ingress[0]'"] + working_dir = path.cwd + program = ["bash", "-c", "kubectl --kubeconfig ${var.eks_info.kubeconfig_file} get svc -n ${var.cluster_name} ${var.cluster_name}-tidb -o json | jq '.status.loadBalancer.ingress[0]'"] } data "external" "monitor_elb" { depends_on = [null_resource.deploy-cluster] - program = ["bash", "-c", "kubectl --kubeconfig ${path.module}/kubeconfig_${var.cluster_name}.yaml get svc -n ${var.cluster_name} ${var.cluster_name}-grafana -o json | jq '.status.loadBalancer.ingress[0]'"] + working_dir = path.cwd + program = ["bash", "-c", "kubectl --kubeconfig ${var.eks_info.kubeconfig_file} get svc -n ${var.cluster_name} ${var.cluster_name}-grafana -o json | jq '.status.loadBalancer.ingress[0]'"] } diff --git a/deploy/aws/tidb-cluster/local.tf b/deploy/aws/tidb-cluster/local.tf index 3eb8b493a1..e89d636979 100644 --- a/deploy/aws/tidb-cluster/local.tf +++ b/deploy/aws/tidb-cluster/local.tf @@ -60,11 +60,12 @@ locals { name = "${var.cluster_name}-tikv" key_name = var.ssh_key_name instance_type = var.tikv_instance_type - root_volume_size = "100" + root_volume_size = "50" public_ip = false kubelet_extra_args = "--register-with-taints=dedicated=${var.cluster_name}-tikv:NoSchedule --node-labels=dedicated=${var.cluster_name}-tikv" asg_desired_capacity = var.tikv_count asg_max_size = var.tikv_count + 2 + pre_userdata = file("${path.module}/pre_userdata") # additional_userdata = file("userdata.sh") }, { @@ -72,7 +73,7 @@ locals { key_name = var.ssh_key_name instance_type = var.tidb_instance_type root_volume_type = "gp2" - root_volume_size = "100" + root_volume_size = "50" public_ip = false kubelet_extra_args = "--register-with-taints=dedicated=${var.cluster_name}-tidb:NoSchedule --node-labels=dedicated=${var.cluster_name}-tidb" asg_desired_capacity = var.tidb_count @@ -83,7 +84,7 @@ locals { key_name = var.ssh_key_name instance_type = var.monitor_instance_type root_volume_type = "gp2" - root_volume_size = "100" + root_volume_size = "50" public_ip = false asg_desired_capacity = 1 asg_max_size = 3 diff --git a/deploy/aws/tidb-cluster/outputs.tf b/deploy/aws/tidb-cluster/outputs.tf index 835f6f1fae..396e707f5c 100644 --- a/deploy/aws/tidb-cluster/outputs.tf +++ b/deploy/aws/tidb-cluster/outputs.tf @@ -1,7 +1,7 @@ output "tidb_dns" { - value = data.external.tidb_elb.result["hostname"] + value = lookup(data.external.tidb_elb.result, "hostname", "empty") } output "monitor_dns" { - value = data.external.monitor_elb.result["hostname"] + value = lookup(data.external.monitor_elb.result, "hostname", "emtpy") } diff --git a/deploy/aws/tidb-cluster/pre_userdata b/deploy/aws/tidb-cluster/pre_userdata new file mode 100644 index 0000000000..b601a7fa99 --- /dev/null +++ b/deploy/aws/tidb-cluster/pre_userdata @@ -0,0 +1,30 @@ +# According to https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html +# NVMe device names are in the format /dev/nvme[0-26]n1 +# and when the device is parted, it has /dev/nvme[0-26]n1p[1-xxx] +# if the disk is parted, we will ignore the entire device +# and if the disk is already formatted, then it will have a blkid, it should also be ignored +for i in `seq 0 26`; do + if [ -e "/dev/nvme${i}" ]; then # find the device + if [ -e "/dev/nvme${i}n1" ]; then + if ls /dev/nvme${i}n1p* > /dev/null 2>&1; then + echo "disk /dev/nvme${i}n1 already parted, skipping" + else + echo "disk /dev/nvme${i}n1 is not parted" + if ! blkid /dev/nvme${i}n1 > /dev/null; then + echo "/dev/nvme${i}n1 not formatted" + mkfs -t ext4 /dev/nvme${i}n1 + mkdir -p /mnt/local-ssd/ssd${i} + cat <> /etc/fstab +/dev/nvme${i}n1 /mnt/local-ssd/ssd${i} ext4 defaults,nofail,noatime,nodelalloc 0 2 +EOF + fi + fi + fi + fi +done + +# mount local ssd disks +mount -a + +# ZONE=$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone) +# AWS_DEFAULT_REGION=$(echo $ZONE | awk '{print substr($0, 1, length($0)-1)}') \ No newline at end of file diff --git a/deploy/aws/tidb-cluster/values/default.yaml b/deploy/aws/tidb-cluster/values/default.yaml deleted file mode 100644 index e5fc1423b0..0000000000 --- a/deploy/aws/tidb-cluster/values/default.yaml +++ /dev/null @@ -1,9 +0,0 @@ -timezone: UTC - -pd: - logLevel: info -tikv: - logLevel: info - syncLog: true -tidb: - logLevel: info diff --git a/deploy/aws/tidb-cluster/variables.tf b/deploy/aws/tidb-cluster/variables.tf index c937c95341..bac21c8cf6 100644 --- a/deploy/aws/tidb-cluster/variables.tf +++ b/deploy/aws/tidb-cluster/variables.tf @@ -15,7 +15,7 @@ variable "worker_groups" { default = [ { - "name" = "default" + name = "default" }, ] } @@ -47,7 +47,7 @@ variable "worker_groups_launch_template" { default = [ { - "name" = "default" + name = "default" }, ] } @@ -84,95 +84,22 @@ variable "worker_additional_security_group_ids" { default = [] } -variable "worker_sg_ingress_from_port" { - description = "Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443)." - default = "1025" -} - -variable "workers_additional_policies" { - description = "Additional policies to be added to workers" - type = list(string) - default = [] -} - -variable "workers_additional_policies_count" { - default = 0 -} - -variable "kubeconfig_aws_authenticator_command" { - description = "Command to use to fetch AWS EKS credentials." - default = "aws-iam-authenticator" -} - -variable "kubeconfig_aws_authenticator_command_args" { - description = "Default arguments passed to the authenticator command. Defaults to [token -i $cluster_name]." - type = list(string) - default = [] -} - -variable "kubeconfig_aws_authenticator_additional_args" { - description = "Any additional arguments to pass to the authenticator such as the role to assume. e.g. [\"-r\", \"MyEksRole\"]." - type = list(string) - default = [] -} - -variable "kubeconfig_aws_authenticator_env_variables" { - description = "Environment variables that should be used when executing the authenticator. e.g. { AWS_PROFILE = \"eks\"}." - type = map(string) - default = {} -} - -variable "cluster_create_timeout" { - description = "Timeout value when creating the EKS cluster." - default = "15m" -} - -variable "cluster_delete_timeout" { - description = "Timeout value when deleting the EKS cluster." - default = "15m" -} - variable "local_exec_interpreter" { description = "Command to run for local-exec resources. Must be a shell-style interpreter. If you are on Windows Git Bash is a good choice." type = list(string) default = ["/bin/sh", "-c"] } -variable "cluster_create_security_group" { - description = "Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id`." - default = true -} - -variable "worker_create_security_group" { - description = "Whether to create a security group for the workers or attach the workers to `worker_security_group_id`." - default = true -} - -variable "permissions_boundary" { - description = "If provided, all IAM roles will be created with this permissions boundary attached." - default = "" -} - variable "iam_path" { description = "If provided, all IAM roles will be created on this path." default = "/" } -variable "cluster_endpoint_private_access" { - description = "Indicates whether or not the Amazon EKS private API server endpoint is enabled." - default = false -} - -variable "cluster_endpoint_public_access" { - description = "Indicates whether or not the Amazon EKS public API server endpoint is enabled." - default = true -} - -variable "operator_version" { - description = "tidb operator version" +variable "tidb_cluster_chart_version" { + description = "tidb-cluster chart version" default = "v1.0.0-beta.3" } @@ -225,14 +152,19 @@ variable "monitor_instance_type" { default = "c5d.large" } -variable "monitor_storage_size" { +variable "monitor_storage_class" { type = string - default = "100Gi" + default = "ebs-gp2" } -variable "monitor_enable_anonymous_user" { - type = bool - default = false +variable "pd_storage_class" { + type = string + default = "ebs-gp2" +} + +variable "tikv_storage_class" { + type = string + default = "local-storage" } variable "override_values" { diff --git a/deploy/aws/tidb-cluster/workers.tf b/deploy/aws/tidb-cluster/workers.tf index 59b8b24c15..21e44ffd77 100644 --- a/deploy/aws/tidb-cluster/workers.tf +++ b/deploy/aws/tidb-cluster/workers.tf @@ -44,23 +44,23 @@ resource "aws_autoscaling_group" "workers" { tags = concat( [ { - "key" = "Name" - "value" = "${var.eks_info.name}-${lookup(local.tidb_cluster_worker_groups[count.index], "name", count.index)}-eks_asg" - "propagate_at_launch" = true + key = "Name" + value = "${var.eks_info.name}-${lookup(local.tidb_cluster_worker_groups[count.index], "name", count.index)}-eks_asg" + propagate_at_launch = true }, { - "key" = "kubernetes.io/cluster/${var.eks_info.name}" - "value" = "owned" - "propagate_at_launch" = true + key = "kubernetes.io/cluster/${var.eks_info.name}" + value = "owned" + propagate_at_launch = true }, { - "key" = "k8s.io/cluster-autoscaler/${lookup( + key = "k8s.io/cluster-autoscaler/${lookup( local.tidb_cluster_worker_groups[count.index], "autoscaling_enabled", local.workers_group_defaults["autoscaling_enabled"], ) == 1 ? "enabled" : "disabled"}" - "value" = "true" - "propagate_at_launch" = false + value = "true" + propagate_at_launch = false }, # { # "key" = "k8s.io/cluster-autoscaler/${var.eks_info.name}" @@ -68,13 +68,13 @@ resource "aws_autoscaling_group" "workers" { # "propagate_at_launch" = false # }, { - "key" = "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage" - "value" = "${lookup( + key = "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage" + value = "${lookup( local.tidb_cluster_worker_groups[count.index], "root_volume_size", local.workers_group_defaults["root_volume_size"], )}Gi" - "propagate_at_launch" = false + propagate_at_launch = false }, ], local.asg_tags, diff --git a/deploy/aws/tidb-cluster/workers_launch_template.tf b/deploy/aws/tidb-cluster/workers_launch_template.tf index cd5205f84a..2f17cede4d 100644 --- a/deploy/aws/tidb-cluster/workers_launch_template.tf +++ b/deploy/aws/tidb-cluster/workers_launch_template.tf @@ -148,27 +148,27 @@ resource "aws_autoscaling_group" "workers_launch_template" { tags = concat( [ { - "key" = "Name" - "value" = "${var.eks_info.name}-${lookup( + key = "Name" + value = "${var.eks_info.name}-${lookup( var.worker_groups_launch_template[count.index], "name", count.index, )}-eks_asg" - "propagate_at_launch" = true + propagate_at_launch = true }, { - "key" = "kubernetes.io/cluster/${var.eks_info.name}" - "value" = "owned" - "propagate_at_launch" = true + key = "kubernetes.io/cluster/${var.eks_info.name}" + value = "owned" + propagate_at_launch = true }, { - "key" = "k8s.io/cluster-autoscaler/${lookup( + key = "k8s.io/cluster-autoscaler/${lookup( var.worker_groups_launch_template[count.index], "autoscaling_enabled", local.workers_group_launch_template_defaults["autoscaling_enabled"], ) == 1 ? "enabled" : "disabled"}" - "value" = "true" - "propagate_at_launch" = false + value = "true" + propagate_at_launch = false }, # { # "key" = "k8s.io/cluster-autoscaler/${var.eks_info.name}" @@ -176,13 +176,13 @@ resource "aws_autoscaling_group" "workers_launch_template" { # "propagate_at_launch" = false # }, { - "key" = "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage" - "value" = "${lookup( + key = "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage" + value = "${lookup( var.worker_groups_launch_template[count.index], "root_volume_size", local.workers_group_launch_template_defaults["root_volume_size"], )}Gi" - "propagate_at_launch" = false + propagate_at_launch = false }, ], local.asg_tags, diff --git a/deploy/aws/tikv-userdata.sh b/deploy/aws/tikv-userdata.sh index d71c5b9512..3187a39fff 100644 --- a/deploy/aws/tikv-userdata.sh +++ b/deploy/aws/tikv-userdata.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + # set system ulimits cat < /etc/security/limits.d/99-tidb.conf root soft nofile 1000000 diff --git a/deploy/aws/userdata.sh b/deploy/aws/userdata.sh index 123ba40add..9c9cc27be5 100644 --- a/deploy/aws/userdata.sh +++ b/deploy/aws/userdata.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + # set ulimits cat < /etc/security/limits.d/99-tidb.conf root soft nofile 1000000 diff --git a/deploy/aws/values/default.yaml b/deploy/aws/values/default.yaml new file mode 100644 index 0000000000..ec3b964122 --- /dev/null +++ b/deploy/aws/values/default.yaml @@ -0,0 +1,24 @@ +# Basic customization for tidb-cluster chart that suits AWS environment +# It is recommended to make a copy of this file and customize for each of your TiDB cluster +timezone: UTC + +pd: + logLevel: info +tikv: + logLevel: info + syncLog: true +tidb: + logLevel: info + service: + type: LoadBalancer + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: '0.0.0.0/0' + service.beta.kubernetes.io/aws-load-balancer-type: nlb + +monitor: + storage: 100Gi + grafana: + config: + GF_AUTH_ANONYMOUS_ENABLED: "true" + service: + type: LoadBalancer \ No newline at end of file diff --git a/deploy/aws/variables.tf b/deploy/aws/variables.tf index 19ef591f4f..a6bc447687 100644 --- a/deploy/aws/variables.tf +++ b/deploy/aws/variables.tf @@ -1,5 +1,5 @@ variable "region" { - description = "aws region" + description = "AWS region" # supported regions: # US: us-east-1, us-east-2, us-west-2 # Asia Pacific: ap-south-1, ap-northeast-2, ap-southeast-1, ap-southeast-2, ap-northeast-1 @@ -7,53 +7,105 @@ variable "region" { default = "us-west-2" } +variable "eks_name" { + description = "Name of the EKS cluster. Also used as a prefix in names of related resources." + default = "my-cluster" +} + +variable "eks_version" { + description = "Kubernetes version to use for the EKS cluster." + default = "1.12" +} + +variable "operator_version" { + description = "TiDB operator version" + default = "v1.0.0-beta.3" +} + # Please note that this is only for manually created VPCs, deploying multiple EKS # clusters in one VPC is NOT supported now. variable "create_vpc" { - description = "Create a new VPC or not, if true the vpc_cidr/private_subnets/public_subnets must be set correctly, otherwise vpc_id/subnet_ids must be set correctly" + description = "Create a new VPC or not, if true the vpc_id/subnet_ids must be set correctly, otherwise the vpc_cidr/private_subnets/public_subnets must be set correctly" default = true } variable "vpc_cidr" { - description = "vpc cidr" + description = "VPC cidr, must be set correctly if create_vpc is true" default = "10.0.0.0/16" } variable "private_subnets" { - description = "vpc private subnets" + description = "VPC private subnets, must be set correctly if create_vpc is true" type = list(string) default = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] } variable "public_subnets" { - description = "vpc public subnets" + description = "VPC public subnets, must be set correctly if create_vpc is true" type = list(string) default = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] } variable "vpc_id" { - description = "VPC id" + description = "VPC id, must be set correctly if create_vpc is false" type = string - default = "vpc-c679deae" + default = "" } variable "subnets" { - description = "subnet id list" + description = "subnet id list, must be set correctly if create_vpc is false" type = list(string) - default = ["subnet-899e79f3", "subnet-a72d80cf", "subnet-a76d34ea"] + default = [] } -variable "eks_name" { - description = "Name of the EKS cluster. Also used as a prefix in names of related resources." - default = "my-cluster" +variable "bastion_ingress_cidr" { + description = "IP cidr that allowed to access bastion ec2 instance" + default = ["0.0.0.0/0"] # Note: Please restrict your ingress to only necessary IPs. Opening to 0.0.0.0/0 can lead to security vulnerabilities. } -variable "eks_version" { - description = "Kubernetes version to use for the EKS cluster." - default = "1.12" +variable "create_bastion" { + description = "Create bastion ec2 instance to access TiDB cluster" + default = true } -variable "operator_version" { - description = "tidb operator version" - default = "v1.0.0-beta.3" +variable "bastion_instance_type" { + description = "bastion ec2 instance type" + default = "t2.micro" +} + +# For aws tutorials compatiablity +variable "default_cluster_version" { + default = "3.0.0" +} + +variable "default_cluster_pd_count" { + default = 3 +} + +variable "default_cluster_tikv_count" { + default = 3 +} + +variable "default_cluster_tidb_count" { + default = 2 +} + +variable "default_cluster_pd_instance_type" { + default = "m5.xlarge" +} + +variable "default_cluster_tikv_instance_type" { + default = "c5d.4xlarge" +} + +variable "default_cluster_tidb_instance_type" { + default = "c5.4xlarge" +} + +variable "default_cluster_monitor_instance_type" { + default = "c5.2xlarge" +} + +variable "default_cluster_name" { + default = "my-cluster" } From fc2e26250253c88f486c5cf5b36676920f9d8d48 Mon Sep 17 00:00:00 2001 From: Aylei Date: Sat, 29 Jun 2019 22:57:45 +0800 Subject: [PATCH 03/11] Remove commented codes Signed-off-by: Aylei --- deploy/aws/eks/workers.tf | 27 ----------- deploy/aws/eks/workers_launch_template.tf | 31 ------------- deploy/aws/tidb-cluster/workers.tf | 11 ----- .../tidb-cluster/workers_launch_template.tf | 45 +------------------ 4 files changed, 1 insertion(+), 113 deletions(-) diff --git a/deploy/aws/eks/workers.tf b/deploy/aws/eks/workers.tf index d5d7c52573..47d60f7a75 100644 --- a/deploy/aws/eks/workers.tf +++ b/deploy/aws/eks/workers.tf @@ -22,15 +22,6 @@ resource "aws_autoscaling_group" "workers" { "asg_force_delete", local.workers_group_defaults["asg_force_delete"], ) - # target_group_arns = compact( - # split( - # ",", - # coalesce( - # lookup(local.control_worker_groups[count.index], "target_group_arns", ""), - # local.workers_group_defaults["target_group_arns"], - # ), - # ), - # ) launch_configuration = element(aws_launch_configuration.workers.*.id, count.index) vpc_zone_identifier = split( ",", @@ -44,24 +35,6 @@ resource "aws_autoscaling_group" "workers" { "protect_from_scale_in", local.workers_group_defaults["protect_from_scale_in"], ) - # suspended_processes = compact( - # split( - # ",", - # coalesce( - # lookup(local.control_worker_groups[count.index], "suspended_processes", ""), - # local.workers_group_defaults["suspended_processes"], - # ), - # ), - # ) - # enabled_metrics = compact( - # split( - # ",", - # coalesce( - # lookup(local.control_worker_groups[count.index], "enabled_metrics", ""), - # local.workers_group_defaults["enabled_metrics"], - # ), - # ), - # ) count = var.worker_group_count placement_group = lookup( local.control_worker_groups[count.index], diff --git a/deploy/aws/eks/workers_launch_template.tf b/deploy/aws/eks/workers_launch_template.tf index 5d99dd2d0f..8a84575c6f 100644 --- a/deploy/aws/eks/workers_launch_template.tf +++ b/deploy/aws/eks/workers_launch_template.tf @@ -104,32 +104,6 @@ resource "aws_autoscaling_group" "workers_launch_template" { "protect_from_scale_in", local.workers_group_launch_template_defaults["protect_from_scale_in"], ) - # suspended_processes = compact( - # split( - # ",", - # coalesce( - # lookup( - # var.worker_groups_launch_template[count.index], - # "suspended_processes", - # "", - # ), - # local.workers_group_launch_template_defaults["suspended_processes"], - # ), - # ), - # ) - # enabled_metrics = compact( - # split( - # ",", - # coalesce( - # lookup( - # var.worker_groups_launch_template[count.index], - # "enabled_metrics", - # "", - # ), - # local.workers_group_launch_template_defaults["enabled_metrics"], - # ), - # ), - # ) count = var.worker_group_launch_template_count tags = concat( @@ -157,11 +131,6 @@ resource "aws_autoscaling_group" "workers_launch_template" { value = "true" propagate_at_launch = false }, - # { - # "key" = "k8s.io/cluster-autoscaler/${aws_eks_cluster.this.name}" - # "value" = "" - # "propagate_at_launch" = false - # }, { key = "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage" value = "${lookup( diff --git a/deploy/aws/tidb-cluster/workers.tf b/deploy/aws/tidb-cluster/workers.tf index 21e44ffd77..4f00c4ea86 100644 --- a/deploy/aws/tidb-cluster/workers.tf +++ b/deploy/aws/tidb-cluster/workers.tf @@ -18,15 +18,6 @@ resource "aws_autoscaling_group" "workers" { local.workers_group_defaults["asg_min_size"], ) force_delete = false - # target_group_arns = compact( - # split( - # ",", - # coalesce( - # lookup(local.tidb_cluster_worker_groups[count.index], "target_group_arns", ""), - # local.workers_group_defaults["target_group_arns"], - # ), - # ), - # ) launch_configuration = element(aws_launch_configuration.workers.*.id, count.index) vpc_zone_identifier = split( ",", @@ -36,8 +27,6 @@ resource "aws_autoscaling_group" "workers" { ), ) protect_from_scale_in = false - # suspended_processes = "" # A comma delimited string of processes to to suspend. i.e. AZRebalance,HealthCheck,ReplaceUnhealthy - # enabled_metrics = "" # A comma delimited list of metrics to be collected i.e. GroupMinSize,GroupMaxSize,GroupDesiredCapacity count = local.worker_group_count placement_group = "" # The name of the placement group into which to launch the instances, if any. diff --git a/deploy/aws/tidb-cluster/workers_launch_template.tf b/deploy/aws/tidb-cluster/workers_launch_template.tf index 2f17cede4d..d93554e849 100644 --- a/deploy/aws/tidb-cluster/workers_launch_template.tf +++ b/deploy/aws/tidb-cluster/workers_launch_template.tf @@ -26,19 +26,6 @@ resource "aws_autoscaling_group" "workers_launch_template" { "asg_force_delete", local.workers_group_launch_template_defaults["asg_force_delete"], ) - # target_group_arns = compact( - # split( - # ",", - # coalesce( - # lookup( - # var.worker_groups_launch_template[count.index], - # "target_group_arns", - # "", - # ), - # local.workers_group_launch_template_defaults["target_group_arns"], - # ), - # ), - # ) mixed_instances_policy { instances_distribution { @@ -117,32 +104,7 @@ resource "aws_autoscaling_group" "workers_launch_template" { "protect_from_scale_in", local.workers_group_launch_template_defaults["protect_from_scale_in"], ) - # suspended_processes = compact( - # split( - # ",", - # coalesce( - # lookup( - # var.worker_groups_launch_template[count.index], - # "suspended_processes", - # "", - # ), - # local.workers_group_launch_template_defaults["suspended_processes"], - # ), - # ), - # ) - # enabled_metrics = compact( - # split( - # ",", - # coalesce( - # lookup( - # var.worker_groups_launch_template[count.index], - # "enabled_metrics", - # "", - # ), - # local.workers_group_launch_template_defaults["enabled_metrics"], - # ), - # ), - # ) + count = var.worker_group_launch_template_count tags = concat( @@ -170,11 +132,6 @@ resource "aws_autoscaling_group" "workers_launch_template" { value = "true" propagate_at_launch = false }, - # { - # "key" = "k8s.io/cluster-autoscaler/${var.eks_info.name}" - # "value" = "" - # "propagate_at_launch" = false - # }, { key = "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage" value = "${lookup( From 49e22c1e95f56e034b25f4a09dba6b1005cb8395 Mon Sep 17 00:00:00 2001 From: Aylei Date: Sun, 30 Jun 2019 17:33:29 +0800 Subject: [PATCH 04/11] Refine README me of terraform for eks Signed-off-by: Aylei --- deploy/aws/README.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/deploy/aws/README.md b/deploy/aws/README.md index b28a8afe8b..7fd61f4275 100644 --- a/deploy/aws/README.md +++ b/deploy/aws/README.md @@ -161,13 +161,11 @@ Currently, the instance type of TiDB cluster component is not configurable becau ### Customize TiDB parameters -By default, the terraform script will pass `./values/default.yaml` to the tidb-cluster helm chart. You change the `overrides_values` of the tidb cluster module to specify a customized values file. - -The reference of the values file can be found [here]() +By default, the terraform script will pass `./values/default.yaml` to the tidb-cluster helm chart. You can change the `overrides_values` of the tidb cluster module to specify a customized values file. ## Multiple Cluster Management -An instance of `./tidb-cluster` module correspond to a TiDB cluster in the EKS cluster. If you want to add a new TiDB cluster, you can edit `./cluster.tf` and add a new instance of `./tidb-cluster` module: +An instance of `./tidb-cluster` module corresponds to a TiDB cluster in the EKS cluster. If you want to add a new TiDB cluster, you can edit `./cluster.tf` and add a new instance of `./tidb-cluster` module: ```hcl module example-cluster { From 639fcaf755e3dfe81d1ef15d503831b6b7a6d127 Mon Sep 17 00:00:00 2001 From: Aylei Date: Tue, 2 Jul 2019 14:05:36 +0800 Subject: [PATCH 05/11] Update deploy/aws/README.md Co-Authored-By: Greg Weber --- deploy/aws/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/aws/README.md b/deploy/aws/README.md index 7fd61f4275..4570a5aabe 100644 --- a/deploy/aws/README.md +++ b/deploy/aws/README.md @@ -251,7 +251,7 @@ $ terraform destroy > **Note:** > -> If you specify service type `LoadBalancer` for the services like the default configuration do, you have to delete these services before destroy, otherwise they will block the subnets to be destroyed. +> If you specify service type `LoadBalancer` for the services like the default configuration do, you have to delete these services before destroy, otherwise they will block the subnets from being destroyed. > **Note:** > From 6a2ce1b0e66f65329873ed13cb0544454cba48fa Mon Sep 17 00:00:00 2001 From: Aylei Date: Tue, 2 Jul 2019 15:37:35 +0800 Subject: [PATCH 06/11] Address review comments Signed-off-by: Aylei --- deploy/aws/clusters.tf | 2 +- deploy/aws/eks/initialize.tf | 4 +- deploy/aws/eks/local.tf | 1 + deploy/aws/eks/manifests/crd.yaml | 103 ------------------ .../manifests/local-volume-provisioner.yaml | 6 - deploy/aws/eks/manifests/tiller-rbac.yaml | 18 --- deploy/aws/eks/outputs.tf | 5 + deploy/aws/tidb-cluster/pre_userdata | 31 +++--- 8 files changed, 24 insertions(+), 146 deletions(-) delete mode 100644 deploy/aws/eks/manifests/crd.yaml delete mode 100644 deploy/aws/eks/manifests/tiller-rbac.yaml diff --git a/deploy/aws/clusters.tf b/deploy/aws/clusters.tf index 50fda10c27..ed07e08165 100644 --- a/deploy/aws/clusters.tf +++ b/deploy/aws/clusters.tf @@ -22,7 +22,7 @@ module "default-cluster" { source = "./tidb-cluster" - eks_info = local.default_eks + eks_info = module.eks subnets = local.default_subnets cluster_name = var.default_cluster_name diff --git a/deploy/aws/eks/initialize.tf b/deploy/aws/eks/initialize.tf index d6c76b8053..bd186df6fe 100644 --- a/deploy/aws/eks/initialize.tf +++ b/deploy/aws/eks/initialize.tf @@ -16,10 +16,10 @@ for i in `seq 1 10`; do echo "${null_resource.update_aws_auth_and_install_operator[0].triggers.config_map_rendered}" > aws_auth_configmap.yaml kubectl apply -f aws_auth_configmap.yaml --kubeconfig kube_config.yaml && break || sleep 10 done -kubectl apply -f manifests/crd.yaml +kubectl apply -f https://raw.githubusercontent.com/pingcap/tidb-operator/v1.0.0-beta.3/manifests/crd.yaml +kubectl apply -f https://raw.githubusercontent.com/pingcap/tidb-operator/v1.0.0-beta.3/manifests/tiller-rbac.yaml kubectl apply -f manifests/local-volume-provisioner.yaml kubectl apply -f manifests/gp2-storageclass.yaml -kubectl apply -f manifests/tiller-rbac.yaml helm init --service-account tiller --upgrade --wait until helm ls; do echo "Wait tiller ready" diff --git a/deploy/aws/eks/local.tf b/deploy/aws/eks/local.tf index ab85bf732c..26d8595d4b 100644 --- a/deploy/aws/eks/local.tf +++ b/deploy/aws/eks/local.tf @@ -116,6 +116,7 @@ locals { vpc_id = var.vpc_id } + # Taken from https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml ebs_optimized = { "c1.medium" = false "c1.xlarge" = true diff --git a/deploy/aws/eks/manifests/crd.yaml b/deploy/aws/eks/manifests/crd.yaml deleted file mode 100644 index 789dde4285..0000000000 --- a/deploy/aws/eks/manifests/crd.yaml +++ /dev/null @@ -1,103 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - # name must match the spec fields below, and be in the form: . - name: tidbclusters.pingcap.com -spec: - # group name to use for REST API: /apis// - group: pingcap.com - # list of versions supported by this CustomResourceDefinition - version: v1alpha1 - # either Namespaced or Cluster - scope: Namespaced - names: - # plural name to be used in the URL: /apis/// - plural: tidbclusters - # singular name to be used as an alias on the CLI and for display - singular: tidbcluster - # kind is normally the CamelCased singular type. Your resource manifests use this. - kind: TidbCluster - # shortNames allow shorter string to match your resource on the CLI - shortNames: - - tc - additionalPrinterColumns: - - name: PD - type: string - description: The image for PD cluster - JSONPath: .spec.pd.image - - name: Storage - type: string - description: The storage size specified for PD node - JSONPath: .spec.pd.requests.storage - - name: Ready - type: integer - description: The ready replicas number of PD cluster - JSONPath: .status.pd.statefulSet.readyReplicas - - name: Desire - type: integer - description: The desired replicas number of PD cluster - JSONPath: .spec.pd.replicas - - name: TiKV - type: string - description: The image for TiKV cluster - JSONPath: .spec.tikv.image - - name: Storage - type: string - description: The storage size specified for TiKV node - JSONPath: .spec.tikv.requests.storage - - name: Ready - type: integer - description: The ready replicas number of TiKV cluster - JSONPath: .status.tikv.statefulSet.readyReplicas - - name: Desire - type: integer - description: The desired replicas number of TiKV cluster - JSONPath: .spec.tikv.replicas - - name: TiDB - type: string - description: The image for TiDB cluster - JSONPath: .spec.tidb.image - - name: Ready - type: integer - description: The ready replicas number of TiDB cluster - JSONPath: .status.tidb.statefulSet.readyReplicas - - name: Desire - type: integer - description: The desired replicas number of TiDB cluster - JSONPath: .spec.tidb.replicas - validation: - # openAPIV3Schema is the schema for validating custom objects. - openAPIV3Schema: - properties: - spec: - properties: - pd: - properties: - limits: - properties: - cpu: - type: string - requests: - properties: - cpu: - type: string - tikv: - properties: - limits: - properties: - cpu: - type: string - requests: - properties: - cpu: - type: string - tidb: - properties: - limits: - properties: - cpu: - type: string - requests: - properties: - cpu: - type: string diff --git a/deploy/aws/eks/manifests/local-volume-provisioner.yaml b/deploy/aws/eks/manifests/local-volume-provisioner.yaml index f0f11e772a..941babe068 100644 --- a/deploy/aws/eks/manifests/local-volume-provisioner.yaml +++ b/deploy/aws/eks/manifests/local-volume-provisioner.yaml @@ -66,9 +66,6 @@ spec: - mountPath: /etc/provisioner/config name: provisioner-config readOnly: true - # mounting /dev in DinD environment would fail - # - mountPath: /dev - # name: provisioner-dev - mountPath: /mnt/local-ssd name: local-disks mountPropagation: "HostToContainer" @@ -76,9 +73,6 @@ spec: - name: provisioner-config configMap: name: local-provisioner-config - # - name: provisioner-dev - # hostPath: - # path: /dev - name: local-disks hostPath: path: /mnt/local-ssd diff --git a/deploy/aws/eks/manifests/tiller-rbac.yaml b/deploy/aws/eks/manifests/tiller-rbac.yaml deleted file mode 100644 index d3ed1ad46e..0000000000 --- a/deploy/aws/eks/manifests/tiller-rbac.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: tiller - namespace: kube-system ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: tiller-clusterrolebinding -subjects: -- kind: ServiceAccount - name: tiller - namespace: kube-system -roleRef: - kind: ClusterRole - name: cluster-admin - apiGroup: "" diff --git a/deploy/aws/eks/outputs.tf b/deploy/aws/eks/outputs.tf index 25d655cf26..6cf043f4b4 100644 --- a/deploy/aws/eks/outputs.tf +++ b/deploy/aws/eks/outputs.tf @@ -54,6 +54,11 @@ output "kubeconfig_filename" { value = element(concat(local_file.kubeconfig.*.filename, [""]), 0) } +output "kubeconfig_file" { + description = "The filename of the generated kubectl config." + value = element(concat(local_file.kubeconfig.*.filename, [""]), 0) +} + output "workers_asg_arns" { description = "IDs of the autoscaling groups containing workers." value = concat( diff --git a/deploy/aws/tidb-cluster/pre_userdata b/deploy/aws/tidb-cluster/pre_userdata index b601a7fa99..49e51cd9fe 100644 --- a/deploy/aws/tidb-cluster/pre_userdata +++ b/deploy/aws/tidb-cluster/pre_userdata @@ -4,23 +4,22 @@ # if the disk is parted, we will ignore the entire device # and if the disk is already formatted, then it will have a blkid, it should also be ignored for i in `seq 0 26`; do - if [ -e "/dev/nvme${i}" ]; then # find the device - if [ -e "/dev/nvme${i}n1" ]; then - if ls /dev/nvme${i}n1p* > /dev/null 2>&1; then - echo "disk /dev/nvme${i}n1 already parted, skipping" - else - echo "disk /dev/nvme${i}n1 is not parted" - if ! blkid /dev/nvme${i}n1 > /dev/null; then - echo "/dev/nvme${i}n1 not formatted" - mkfs -t ext4 /dev/nvme${i}n1 - mkdir -p /mnt/local-ssd/ssd${i} - cat <> /etc/fstab -/dev/nvme${i}n1 /mnt/local-ssd/ssd${i} ext4 defaults,nofail,noatime,nodelalloc 0 2 -EOF - fi - fi - fi + if [ -e "/dev/nvme${i}" ]; then # find the device + if [ -e "/dev/nvme${i}n1" ]; then + if ls /dev/nvme${i}n1p* > /dev/null 2>&1; then + echo "disk /dev/nvme${i}n1 already parted, skipping" + else + echo "disk /dev/nvme${i}n1 is not parted" + if ! blkid /dev/nvme${i}n1 > /dev/null; then + echo "/dev/nvme${i}n1 not formatted" + mkfs -t ext4 /dev/nvme${i}n1 + DISK_UUID=$(blkid -s UUID -o value /dev/nvme${i}n1) + mkdir -p /mnt/local-ssd/$DISK_UUID + echo UUID=`blkid -s UUID -o value /dev/nvme${i}n1` /mnt/local-ssd/$DISK_UUID ext4 defaults 0 2 | tee -a /etc/fstab + fi + fi fi + fi done # mount local ssd disks From 7ae6e0d4a4c16e9c5b1019e561d44a96620faf3a Mon Sep 17 00:00:00 2001 From: Aylei Date: Tue, 2 Jul 2019 22:57:55 +0800 Subject: [PATCH 07/11] Refactor AWS terraform scripts, remove the customized eks module Signed-off-by: Aylei --- deploy/aws/README.md | 40 +- deploy/aws/aws-tutorial.tfvars | 6 +- deploy/aws/clusters.tf | 13 +- deploy/aws/default-cluster.yaml | 1 + deploy/aws/eks/cluster.tf | 83 ---- deploy/aws/eks/data.tf | 154 -------- deploy/aws/eks/initialize.tf | 126 ------- deploy/aws/eks/kubectl.tf | 6 - deploy/aws/eks/local.tf | 246 ------------ deploy/aws/eks/outputs.tf | 103 ----- .../config-map-aws-auth-map_accounts.yaml.tpl | 1 - .../config-map-aws-auth-map_roles.yaml.tpl | 4 - .../config-map-aws-auth-map_users.yaml.tpl | 4 - .../templates/config-map-aws-auth.yaml.tpl | 13 - deploy/aws/eks/templates/kubeconfig.tpl | 28 -- deploy/aws/eks/templates/userdata.sh.tpl | 10 - deploy/aws/eks/templates/worker-role.tpl | 5 - deploy/aws/eks/variables.tf | 262 ------------- deploy/aws/eks/workers.tf | 354 ------------------ deploy/aws/eks/workers_launch_template.tf | 298 --------------- deploy/aws/local.tf | 5 - deploy/aws/main.tf | 29 +- deploy/aws/outputs.tf | 4 +- deploy/aws/pd-userdata.sh | 27 -- deploy/aws/tidb-cluster/README.md | 7 + deploy/aws/tidb-cluster/cluster.tf | 211 +++++++---- deploy/aws/tidb-cluster/data.tf | 26 +- deploy/aws/tidb-cluster/local.tf | 12 +- .../{ => tidb-cluster}/values/default.yaml | 5 +- deploy/aws/tidb-cluster/variables.tf | 40 +- deploy/aws/tidb-cluster/workers.tf | 29 +- .../tidb-cluster/workers_launch_template.tf | 16 +- deploy/aws/tidb-operator/README.md | 7 + deploy/aws/tidb-operator/main.tf | 86 +++++ .../manifests/gp2-storageclass.yaml | 0 .../manifests/local-volume-provisioner.yaml | 0 deploy/aws/tidb-operator/outputs.tf | 3 + deploy/aws/tidb-operator/variables.tf | 53 +++ deploy/aws/tikv-userdata.sh | 27 -- deploy/aws/userdata.sh | 37 -- deploy/aws/variables.tf | 12 +- 41 files changed, 394 insertions(+), 1999 deletions(-) create mode 100644 deploy/aws/default-cluster.yaml delete mode 100644 deploy/aws/eks/cluster.tf delete mode 100644 deploy/aws/eks/data.tf delete mode 100644 deploy/aws/eks/initialize.tf delete mode 100644 deploy/aws/eks/kubectl.tf delete mode 100644 deploy/aws/eks/local.tf delete mode 100644 deploy/aws/eks/outputs.tf delete mode 100644 deploy/aws/eks/templates/config-map-aws-auth-map_accounts.yaml.tpl delete mode 100644 deploy/aws/eks/templates/config-map-aws-auth-map_roles.yaml.tpl delete mode 100644 deploy/aws/eks/templates/config-map-aws-auth-map_users.yaml.tpl delete mode 100644 deploy/aws/eks/templates/config-map-aws-auth.yaml.tpl delete mode 100644 deploy/aws/eks/templates/kubeconfig.tpl delete mode 100644 deploy/aws/eks/templates/userdata.sh.tpl delete mode 100644 deploy/aws/eks/templates/worker-role.tpl delete mode 100644 deploy/aws/eks/variables.tf delete mode 100644 deploy/aws/eks/workers.tf delete mode 100644 deploy/aws/eks/workers_launch_template.tf delete mode 100644 deploy/aws/local.tf delete mode 100644 deploy/aws/pd-userdata.sh create mode 100644 deploy/aws/tidb-cluster/README.md rename deploy/aws/{ => tidb-cluster}/values/default.yaml (81%) create mode 100644 deploy/aws/tidb-operator/README.md create mode 100644 deploy/aws/tidb-operator/main.tf rename deploy/aws/{eks => tidb-operator}/manifests/gp2-storageclass.yaml (100%) rename deploy/aws/{eks => tidb-operator}/manifests/local-volume-provisioner.yaml (100%) create mode 100644 deploy/aws/tidb-operator/outputs.tf create mode 100644 deploy/aws/tidb-operator/variables.tf delete mode 100644 deploy/aws/tikv-userdata.sh delete mode 100644 deploy/aws/userdata.sh diff --git a/deploy/aws/README.md b/deploy/aws/README.md index 4570a5aabe..87357b294a 100644 --- a/deploy/aws/README.md +++ b/deploy/aws/README.md @@ -159,9 +159,24 @@ The TiDB version and component count are also configurable in variables.tf, you Currently, the instance type of TiDB cluster component is not configurable because PD and TiKV relies on [NVMe SSD instance store](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ssd-instance-store.html), different instance types have different disks. -### Customize TiDB parameters +### Customize TiDB Cluster -By default, the terraform script will pass `./values/default.yaml` to the tidb-cluster helm chart. You can change the `overrides_values` of the tidb cluster module to specify a customized values file. +The values file ([`./tidb-cluster/values/default.yaml`](./tidb-cluster/values/default.yaml)) provide proper default for TiDB cluster in EKS. You can specify an overriding values file in [`clusters.tf`](./clusters.tf) for each TiDB cluster. Values of this file will override the default ones. + +For example, the default cluster specify using `./default-cluster.yaml` as the overriding values file, and enable the ConfigMap rollout feature in this file. + +In EKS, some values are not customizable as usual, including the cluster version, replicas, node selectors and taints. These variables are controlled by the terraform instead in favor of consistency. To customize these variables, you can edit the [`clusters.tf`](./clusters.tf) and change the variables of each `./tidb-cluster` module directly. + +### Customized TiDB Operator + +You can customize the TiDB operator by specifying a helm values file through the `operator_values` variable. For example: + +```hcl +variable "operator_values" { + description = "The helm values of TiDB Operator" + default = file("operator_values.yaml") +} +``` ## Multiple Cluster Management @@ -175,12 +190,11 @@ module example-cluster { eks_info = local.default_eks # The subnets of node pools of this TiDB cluster, required subnets = local.default_subnets - # TiDB cluster name, required cluster_name = "example-cluster" - # Helm values file, required - override_values = "values/example-cluster.yaml" + # Helm values file + override_values = file("example-cluster.yaml") # TiDB cluster version cluster_version = "v3.0.0" # SSH key of cluster nodes @@ -189,15 +203,11 @@ module example-cluster { pd_count = 3 # TiKV instance type pd_instance_type = "t2.xlarge" - # The storage class used by PD - pd_storage_class = "ebs-gp2" # TiKV replica number tikv_count = 3 # TiKV instance type tikv_instance_type = "t2.xlarge" # The storage class used by TiKV, if the TiKV instance type do not have local SSD, you should change it to storage class - # of cloud disks like 'ebs-gp2'. Note that TiKV without local storage is strongly not recommended in production env. - tikv_storage_class = "local-storage" # TiDB replica number tidb_count = 2 # TiDB instance type @@ -212,7 +222,7 @@ module other-cluster { source = "./tidb-cluster" cluster_name = "other-cluster" - override_values = "values/other-cluster.yaml" + override_values = file("other-cluster.yaml") #...... } ``` @@ -223,17 +233,15 @@ module other-cluster { You can refer to [./tidb-cluster/variables.tf](./tidb-cluster/variables.tf) for the complete configuration reference of `./tidb-cluster` module. -It is recommended to provide a dedicated values file for each TiDB cluster in favor of the ease of management. You can copy the `values/default.yaml` to get a reasonable default. - You can get the DNS name of TiDB service and grafana service via kubectl. If you want terraform to print these information like the `default-cluster`, you can add `output` sections in `outputs.tf`: ```hcl output "example-cluster_tidb-dns" { - value = module.example-cluster.tidb_dns + value = module.example-cluster.tidb_dns } output "example-cluster_monitor-dns" { - value = module.example-cluster.monitor_dns + value = module.example-cluster.monitor_dns } ``` @@ -249,10 +257,6 @@ $ terraform destroy > > This will destroy your EKS cluster along with all the TiDB clusters you deployed on it. -> **Note:** -> -> If you specify service type `LoadBalancer` for the services like the default configuration do, you have to delete these services before destroy, otherwise they will block the subnets from being destroyed. - > **Note:** > > You have to manually delete the EBS volumes in AWS console after running terraform destroy if you do not need the data on the volumes anymore. diff --git a/deploy/aws/aws-tutorial.tfvars b/deploy/aws/aws-tutorial.tfvars index d855acf5f2..6326ec3e03 100644 --- a/deploy/aws/aws-tutorial.tfvars +++ b/deploy/aws/aws-tutorial.tfvars @@ -1,9 +1,9 @@ -default_cluster_pd_instance_type = "c5d.large" +default_cluster_pd_instance_type = "c5d.large" default_cluster_pd_tikv_instance_type = "c5d.large" -default_cluster_tidb_instance_type = "c4.large" +default_cluster_tidb_instance_type = "c4.large" default_cluster_monitor_instance_type = "c5.large" -default_cluster_pd_count = 1 +default_cluster_pd_count = 1 default_cluster_tikv_count = 1 default_cluster_tidb_count = 1 diff --git a/deploy/aws/clusters.tf b/deploy/aws/clusters.tf index ed07e08165..3049468699 100644 --- a/deploy/aws/clusters.tf +++ b/deploy/aws/clusters.tf @@ -16,14 +16,13 @@ # tidb_instance_type = "t2.xlarge" # monitor_instance_type = "t2.xlarge" # # yaml file that passed to helm to customize the release -# override_values = "values/default.yaml" +# override_values = file("values/example.yaml") #} - module "default-cluster" { - source = "./tidb-cluster" - eks_info = module.eks - subnets = local.default_subnets + source = "./tidb-cluster" + eks = local.default_eks + subnets = local.default_subnets cluster_name = var.default_cluster_name cluster_version = var.default_cluster_version @@ -31,9 +30,9 @@ module "default-cluster" { pd_count = var.default_cluster_pd_count pd_instance_type = var.default_cluster_pd_instance_type tikv_count = var.default_cluster_tikv_count - tikv_instance_type = var.default_cluster_tidb_instance_type + tikv_instance_type = var.default_cluster_tikv_instance_type tidb_count = var.default_cluster_tidb_count tidb_instance_type = var.default_cluster_tidb_instance_type monitor_instance_type = var.default_cluster_monitor_instance_type - override_values = "values/default.yaml" + override_values = file("default-cluster.yaml") } diff --git a/deploy/aws/default-cluster.yaml b/deploy/aws/default-cluster.yaml new file mode 100644 index 0000000000..00f5302c63 --- /dev/null +++ b/deploy/aws/default-cluster.yaml @@ -0,0 +1 @@ +enableConfigMapRollout: true \ No newline at end of file diff --git a/deploy/aws/eks/cluster.tf b/deploy/aws/eks/cluster.tf deleted file mode 100644 index 43d4abeb84..0000000000 --- a/deploy/aws/eks/cluster.tf +++ /dev/null @@ -1,83 +0,0 @@ -resource "aws_eks_cluster" "this" { - name = var.cluster_name - role_arn = aws_iam_role.cluster.arn - version = var.cluster_version - - vpc_config { - # TF-UPGRADE-TODO: In Terraform v0.10 and earlier, it was sometimes necessary to - # force an interpolation expression to be interpreted as a list by wrapping it - # in an extra set of list brackets. That form was supported for compatibilty in - # v0.11, but is no longer supported in Terraform v0.12. - # - # If the expression in the following list itself returns a list, remove the - # brackets to avoid interpretation as a list of lists. If the expression - # returns a single list item then leave it as-is and remove this TODO comment. - security_group_ids = [local.cluster_security_group_id] - subnet_ids = var.subnets - endpoint_private_access = var.cluster_endpoint_private_access - endpoint_public_access = var.cluster_endpoint_public_access - } - - timeouts { - create = var.cluster_create_timeout - delete = var.cluster_delete_timeout - } - - depends_on = [ - aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy, - aws_iam_role_policy_attachment.cluster_AmazonEKSServicePolicy, - ] -} - -resource "aws_security_group" "cluster" { - name_prefix = var.cluster_name - description = "EKS cluster security group." - vpc_id = var.vpc_id - tags = merge( - var.tags, - { - Name = "${var.cluster_name}-eks_cluster_sg" - }, - ) - count = var.cluster_create_security_group ? 1 : 0 -} - -resource "aws_security_group_rule" "cluster_egress_internet" { - description = "Allow cluster egress access to the Internet." - protocol = "-1" - security_group_id = aws_security_group.cluster[0].id - cidr_blocks = ["0.0.0.0/0"] - from_port = 0 - to_port = 0 - type = "egress" - count = var.cluster_create_security_group ? 1 : 0 -} - -resource "aws_security_group_rule" "cluster_https_worker_ingress" { - description = "Allow pods to communicate with the EKS cluster API." - protocol = "tcp" - security_group_id = aws_security_group.cluster[0].id - source_security_group_id = local.worker_security_group_id - from_port = 443 - to_port = 443 - type = "ingress" - count = var.cluster_create_security_group ? 1 : 0 -} - -resource "aws_iam_role" "cluster" { - name_prefix = var.cluster_name - assume_role_policy = data.aws_iam_policy_document.cluster_assume_role_policy.json - permissions_boundary = var.permissions_boundary - path = var.iam_path - force_detach_policies = true -} - -resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSClusterPolicy" { - policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy" - role = aws_iam_role.cluster.name -} - -resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSServicePolicy" { - policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy" - role = aws_iam_role.cluster.name -} diff --git a/deploy/aws/eks/data.tf b/deploy/aws/eks/data.tf deleted file mode 100644 index f46b0e3192..0000000000 --- a/deploy/aws/eks/data.tf +++ /dev/null @@ -1,154 +0,0 @@ -data "aws_region" "current" { -} - -data "aws_iam_policy_document" "workers_assume_role_policy" { - statement { - sid = "EKSWorkerAssumeRole" - - actions = [ - "sts:AssumeRole", - ] - - principals { - type = "Service" - identifiers = ["ec2.amazonaws.com"] - } - } -} - -data "aws_ami" "eks_worker" { - filter { - name = "name" - values = ["amazon-eks-node-${var.cluster_version}-${var.worker_ami_name_filter}"] - } - - most_recent = true - - # Owner ID of AWS EKS team - owners = ["602401143452"] -} - -data "aws_iam_policy_document" "cluster_assume_role_policy" { - statement { - sid = "EKSClusterAssumeRole" - - actions = [ - "sts:AssumeRole", - ] - - principals { - type = "Service" - identifiers = ["eks.amazonaws.com"] - } - } -} - -data "template_file" "kubeconfig" { - template = file("${path.module}/templates/kubeconfig.tpl") - - vars = { - kubeconfig_name = local.kubeconfig_name - endpoint = aws_eks_cluster.this.endpoint - region = data.aws_region.current.name - cluster_auth_base64 = aws_eks_cluster.this.certificate_authority[0].data - aws_authenticator_command = var.kubeconfig_aws_authenticator_command - aws_authenticator_command_args = length(var.kubeconfig_aws_authenticator_command_args) > 0 ? " - ${join( - "\n - ", - var.kubeconfig_aws_authenticator_command_args, - )}" : " - ${join( - "\n - ", - formatlist("\"%s\"", ["token", "-i", aws_eks_cluster.this.name]), - )}" - aws_authenticator_additional_args = length(var.kubeconfig_aws_authenticator_additional_args) > 0 ? " - ${join( - "\n - ", - var.kubeconfig_aws_authenticator_additional_args, - )}" : "" - aws_authenticator_env_variables = length(var.kubeconfig_aws_authenticator_env_variables) > 0 ? " env:\n${join( - "\n", - data.template_file.aws_authenticator_env_variables.*.rendered, - )}" : "" - } -} - -data "template_file" "aws_authenticator_env_variables" { - template = < kube_config.yaml - echo "${null_resource.update_aws_auth_and_install_operator[0].triggers.config_map_rendered}" > aws_auth_configmap.yaml - kubectl apply -f aws_auth_configmap.yaml --kubeconfig kube_config.yaml && break || sleep 10 -done -kubectl apply -f https://raw.githubusercontent.com/pingcap/tidb-operator/v1.0.0-beta.3/manifests/crd.yaml -kubectl apply -f https://raw.githubusercontent.com/pingcap/tidb-operator/v1.0.0-beta.3/manifests/tiller-rbac.yaml -kubectl apply -f manifests/local-volume-provisioner.yaml -kubectl apply -f manifests/gp2-storageclass.yaml -helm init --service-account tiller --upgrade --wait -until helm ls; do - echo "Wait tiller ready" -done -helm repo add pingcap http://charts.pingcap.org/ -helm upgrade --install tidb-operator pingcap/tidb-operator --version=${var.operator_version} --namespace=tidb-admin --wait -helm version -rm aws_auth_configmap.yaml kube_config.yaml -EOT - - interpreter = var.local_exec_interpreter - environment = { - KUBECONFIG = "kube_config.yaml" - } - } - - triggers = { - kube_config_map_rendered = data.template_file.kubeconfig.rendered - config_map_rendered = data.template_file.config_map_aws_auth.rendered - endpoint = aws_eks_cluster.this.endpoint - } - - count = var.manage_aws_auth ? 1 : 0 -} - -data "aws_caller_identity" "current" { -} - -data "template_file" "launch_template_worker_role_arns" { - count = var.worker_group_launch_template_count - template = file("${path.module}/templates/worker-role.tpl") - - vars = { - worker_role_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/${element( - aws_iam_instance_profile.workers_launch_template.*.role, - count.index, - )}" - } -} - -data "template_file" "worker_role_arns" { - count = var.worker_group_count - template = file("${path.module}/templates/worker-role.tpl") - - vars = { - worker_role_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/${element(aws_iam_instance_profile.workers.*.role, count.index)}" - } -} - -data "template_file" "config_map_aws_auth" { - template = file("${path.module}/templates/config-map-aws-auth.yaml.tpl") - - vars = { - worker_role_arn = join( - "", - distinct( - concat( - data.template_file.launch_template_worker_role_arns.*.rendered, - data.template_file.worker_role_arns.*.rendered, - ), - ), - ) - map_users = join("", data.template_file.map_users.*.rendered) - map_roles = join("", data.template_file.map_roles.*.rendered) - map_accounts = join("", data.template_file.map_accounts.*.rendered) - } -} - -data "template_file" "map_users" { - count = var.map_users_count - template = file( - "${path.module}/templates/config-map-aws-auth-map_users.yaml.tpl", - ) - - vars = { - user_arn = var.map_users[count.index]["user_arn"] - username = var.map_users[count.index]["username"] - group = var.map_users[count.index]["group"] - } -} - -data "template_file" "map_roles" { - count = var.map_roles_count - template = file( - "${path.module}/templates/config-map-aws-auth-map_roles.yaml.tpl", - ) - - vars = { - role_arn = var.map_roles[count.index]["role_arn"] - username = var.map_roles[count.index]["username"] - group = var.map_roles[count.index]["group"] - } -} - -data "template_file" "map_accounts" { - count = var.map_accounts_count - template = file( - "${path.module}/templates/config-map-aws-auth-map_accounts.yaml.tpl", - ) - - vars = { - account_number = element(var.map_accounts, count.index) - } -} diff --git a/deploy/aws/eks/kubectl.tf b/deploy/aws/eks/kubectl.tf deleted file mode 100644 index cf0d0a395d..0000000000 --- a/deploy/aws/eks/kubectl.tf +++ /dev/null @@ -1,6 +0,0 @@ -resource "local_file" "kubeconfig" { - content = data.template_file.kubeconfig.rendered - filename = "${var.config_output_path}kubeconfig_${var.cluster_name}" - count = var.write_kubeconfig ? 1 : 0 -} - diff --git a/deploy/aws/eks/local.tf b/deploy/aws/eks/local.tf deleted file mode 100644 index 26d8595d4b..0000000000 --- a/deploy/aws/eks/local.tf +++ /dev/null @@ -1,246 +0,0 @@ -locals { - asg_tags = null_resource.tags_as_list_of_maps.*.triggers - - cluster_security_group_id = var.cluster_security_group_id == "" ? join("", aws_security_group.cluster.*.id) : var.cluster_security_group_id - - worker_security_group_id = var.worker_security_group_id == "" ? join("", aws_security_group.workers.*.id) : var.worker_security_group_id - - default_iam_role_id = element(concat(aws_iam_role.workers.*.id, [""]), 0) - kubeconfig_name = var.kubeconfig_name == "" ? "eks_${var.cluster_name}" : var.kubeconfig_name - - - workers_group_defaults_defaults = { - name = "count.index" # Name of the worker group. Literal count.index will never be used but if name is not set, the count.index interpolation will be used. - ami_id = data.aws_ami.eks_worker.id # AMI ID for the eks workers. If none is provided, Terraform will search for the latest version of their EKS optimized worker AMI. - asg_desired_capacity = "1" # Desired worker capacity in the autoscaling group. - asg_max_size = "3" # Maximum worker capacity in the autoscaling group. - asg_min_size = "1" # Minimum worker capacity in the autoscaling group. - asg_force_delete = false # Enable forced deletion for the autoscaling group. - instance_type = "m4.large" # Size of the workers instances. - spot_price = "" # Cost of spot instance. - placement_tenancy = "" # The tenancy of the instance. Valid values are "default" or "dedicated". - root_volume_size = "100" # root volume size of workers instances. - root_volume_type = "gp2" # root volume type of workers instances, can be 'standard', 'gp2', or 'io1' - root_iops = "0" # The amount of provisioned IOPS. This must be set with a volume_type of "io1". - key_name = "" # The key name that should be used for the instances in the autoscaling group - pre_userdata = "" # userdata to pre-append to the default userdata. - bootstrap_extra_args = "" # Extra arguments passed to the bootstrap.sh script from the EKS AMI. - additional_userdata = "" # userdata to append to the default userdata. - ebs_optimized = true # sets whether to use ebs optimization on supported types. - enable_monitoring = true # Enables/disables detailed monitoring. - public_ip = false # Associate a public ip address with a worker - kubelet_extra_args = "" # This string is passed directly to kubelet if set. Useful for adding labels or taints. - subnets = join(",", var.subnets) # A comma delimited string of subnets to place the worker nodes in. i.e. subnet-123,subnet-456,subnet-789 - autoscaling_enabled = false # Sets whether policy and matching tags will be added to allow autoscaling. - additional_security_group_ids = "" # A comma delimited list of additional security group ids to include in worker launch config - protect_from_scale_in = false # Prevent AWS from scaling in, so that cluster-autoscaler is solely responsible. - iam_role_id = local.default_iam_role_id # Use the specified IAM role if set. - suspended_processes = "" # A comma delimited string of processes to to suspend. i.e. AZRebalance,HealthCheck,ReplaceUnhealthy - target_group_arns = "" # A comma delimited list of ALB target group ARNs to be associated to the ASG - enabled_metrics = "" # A comma delimited list of metrics to be collected i.e. GroupMinSize,GroupMaxSize,GroupDesiredCapacity - placement_group = "" # The name of the placement group into which to launch the instances, if any. - } - - control_worker_groups = [ - { - name = "${var.cluster_name}-control" - key_name = var.ssh_key_name - instance_type = "t2.xlarge" - public_ip = false - asg_desired_capacity = 1 - asg_max_size = 3 - }, - ] - - workers_group_defaults = merge( - local.workers_group_defaults_defaults, - var.workers_group_defaults, - ) - - workers_group_launch_template_defaults_defaults = { - name = "count.index" # Name of the worker group. Literal count.index will never be used but if name is not set, the count.index interpolation will be used. - ami_id = data.aws_ami.eks_worker.id # AMI ID for the eks workers. If none is provided, Terraform will search for the latest version of their EKS optimized worker AMI. - root_block_device_id = data.aws_ami.eks_worker.root_device_name # Root device name for workers. If non is provided, will assume default AMI was used. - asg_desired_capacity = "1" # Desired worker capacity in the autoscaling group. - asg_max_size = "3" # Maximum worker capacity in the autoscaling group. - asg_min_size = "1" # Minimum worker capacity in the autoscaling group. - asg_force_delete = false # Enable forced deletion for the autoscaling group. - instance_type = "m4.large" # Size of the workers instances. - override_instance_type = "t3.large" # Need to specify at least one additional instance type for mixed instances policy. The instance_type holds higher priority for on demand instances. - on_demand_allocation_strategy = "prioritized" # Strategy to use when launching on-demand instances. Valid values: prioritized. - on_demand_base_capacity = "0" # Absolute minimum amount of desired capacity that must be fulfilled by on-demand instances - on_demand_percentage_above_base_capacity = "100" # Percentage split between on-demand and Spot instances above the base on-demand capacity - spot_allocation_strategy = "lowest-price" # The only valid value is lowest-price, which is also the default value. The Auto Scaling group selects the cheapest Spot pools and evenly allocates your Spot capacity across the number of Spot pools that you specify. - spot_instance_pools = 10 # "Number of Spot pools per availability zone to allocate capacity. EC2 Auto Scaling selects the cheapest Spot pools and evenly allocates Spot capacity across the number of Spot pools that you specify." - spot_max_price = "" # Maximum price per unit hour that the user is willing to pay for the Spot instances. Default is the on-demand price - spot_price = "" # Cost of spot instance. - placement_tenancy = "default" # The tenancy of the instance. Valid values are "default" or "dedicated". - root_volume_size = "100" # root volume size of workers instances. - root_volume_type = "gp2" # root volume type of workers instances, can be 'standard', 'gp2', or 'io1' - root_iops = "0" # The amount of provisioned IOPS. This must be set with a volume_type of "io1". - root_encrypted = false # root volume encryption for workers. - kms_key_id = "" # KMS key ID used for encrypted block device. ASG must have access to this key. If not specified, the default KMS key will be used. - key_name = "" # The key name that should be used for the instances in the autoscaling group - pre_userdata = "" # userdata to pre-append to the default userdata. - bootstrap_extra_args = "" # Extra arguments passed to the bootstrap.sh script from the EKS AMI. - additional_userdata = "" # userdata to append to the default userdata. - ebs_optimized = true # sets whether to use ebs optimization on supported types. - enable_monitoring = true # Enables/disables detailed monitoring. - public_ip = false # Associate a public ip address with a worker - kubelet_extra_args = "" # This string is passed directly to kubelet if set. Useful for adding labels or taints. - subnets = join(",", var.subnets) # A comma delimited string of subnets to place the worker nodes in. i.e. subnet-123,subnet-456,subnet-789 - autoscaling_enabled = false # Sets whether policy and matching tags will be added to allow autoscaling. - additional_security_group_ids = "" # A comma delimited list of additional security group ids to include in worker launch config - protect_from_scale_in = false # Prevent AWS from scaling in, so that cluster-autoscaler is solely responsible. - iam_role_id = local.default_iam_role_id # Use the specified IAM role if set. - suspended_processes = "" # A comma delimited string of processes to to suspend. i.e. AZRebalance,HealthCheck,ReplaceUnhealthy - target_group_arns = "" # A comma delimited list of ALB target group ARNs to be associated to the ASG - enabled_metrics = "" # A comma delimited list of metrics to be collected i.e. GroupMinSize,GroupMaxSize,GroupDesiredCapacity - } - - workers_group_launch_template_defaults = merge( - local.workers_group_launch_template_defaults_defaults, - var.workers_group_launch_template_defaults, - ) - - eks_info = { - name = aws_eks_cluster.this.name - version = var.cluster_version - endpoint = aws_eks_cluster.this.endpoint - ca = aws_eks_cluster.this.certificate_authority[0].data - kubeconfig = data.template_file.kubeconfig.rendered - kubeconfig_file = "${var.config_output_path}kubeconfig_${var.cluster_name}" - worker_iam_role = aws_iam_role.workers - worker_security_group_id = local.worker_security_group_id - worker_iam_instance_profile = aws_iam_instance_profile.workers - vpc_id = var.vpc_id - } - - # Taken from https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml - ebs_optimized = { - "c1.medium" = false - "c1.xlarge" = true - "c3.large" = false - "c3.xlarge" = true - "c3.2xlarge" = true - "c3.4xlarge" = true - "c3.8xlarge" = false - "c4.large" = true - "c4.xlarge" = true - "c4.2xlarge" = true - "c4.4xlarge" = true - "c4.8xlarge" = true - "c5.large" = true - "c5.xlarge" = true - "c5.2xlarge" = true - "c5.4xlarge" = true - "c5.9xlarge" = true - "c5.18xlarge" = true - "c5d.large" = true - "c5d.xlarge" = true - "c5d.2xlarge" = true - "c5d.4xlarge" = true - "c5d.9xlarge" = true - "c5d.18xlarge" = true - "cc2.8xlarge" = false - "cr1.8xlarge" = false - "d2.xlarge" = true - "d2.2xlarge" = true - "d2.4xlarge" = true - "d2.8xlarge" = true - "f1.2xlarge" = true - "f1.4xlarge" = true - "f1.16xlarge" = true - "g2.2xlarge" = true - "g2.8xlarge" = false - "g3s.xlarge" = true - "g3.4xlarge" = true - "g3.8xlarge" = true - "g3.16xlarge" = true - "h1.2xlarge" = true - "h1.4xlarge" = true - "h1.8xlarge" = true - "h1.16xlarge" = true - "hs1.8xlarge" = false - "i2.xlarge" = true - "i2.2xlarge" = true - "i2.4xlarge" = true - "i2.8xlarge" = false - "i3.large" = true - "i3.xlarge" = true - "i3.2xlarge" = true - "i3.4xlarge" = true - "i3.8xlarge" = true - "i3.16xlarge" = true - "i3.metal" = true - "m1.small" = false - "m1.medium" = false - "m1.large" = true - "m1.xlarge" = true - "m2.xlarge" = false - "m2.2xlarge" = true - "m2.4xlarge" = true - "m3.medium" = false - "m3.large" = false - "m3.xlarge" = true - "m3.2xlarge" = true - "m4.large" = true - "m4.xlarge" = true - "m4.2xlarge" = true - "m4.4xlarge" = true - "m4.10xlarge" = true - "m4.16xlarge" = true - "m5.large" = true - "m5.xlarge" = true - "m5.2xlarge" = true - "m5.4xlarge" = true - "m5.9xlarge" = true - "m5.18xlarge" = true - "m5d.large" = true - "m5d.xlarge" = true - "m5d.2xlarge" = true - "m5d.4xlarge" = true - "m5d.12xlarge" = true - "m5d.24xlarge" = true - "p2.xlarge" = true - "p2.8xlarge" = true - "p2.16xlarge" = true - "p3.2xlarge" = true - "p3.8xlarge" = true - "p3.16xlarge" = true - "r3.large" = false - "r3.xlarge" = true - "r3.2xlarge" = true - "r3.4xlarge" = true - "r3.8xlarge" = false - "r4.large" = true - "r4.xlarge" = true - "r4.2xlarge" = true - "r4.4xlarge" = true - "r4.8xlarge" = true - "r4.16xlarge" = true - "t1.micro" = false - "t2.nano" = false - "t2.micro" = false - "t2.small" = false - "t2.medium" = false - "t2.large" = false - "t2.xlarge" = false - "t2.2xlarge" = false - "t3.nano" = true - "t3.micro" = true - "t3.small" = true - "t3.medium" = true - "t3.large" = true - "t3.xlarge" = true - "t3.2xlarge" = true - "x1.16xlarge" = true - "x1.32xlarge" = true - "x1e.xlarge" = true - "x1e.2xlarge" = true - "x1e.4xlarge" = true - "x1e.8xlarge" = true - "x1e.16xlarge" = true - "x1e.32xlarge" = true - } -} diff --git a/deploy/aws/eks/outputs.tf b/deploy/aws/eks/outputs.tf deleted file mode 100644 index 6cf043f4b4..0000000000 --- a/deploy/aws/eks/outputs.tf +++ /dev/null @@ -1,103 +0,0 @@ -output "cluster_id" { - description = "The name/id of the EKS cluster." - value = aws_eks_cluster.this.id -} - -# Though documented, not yet supported -# output "cluster_arn" { -# description = "The Amazon Resource Name (ARN) of the cluster." -# value = "${aws_eks_cluster.this.arn}" -# } - -output "cluster_certificate_authority_data" { - description = "Nested attribute containing certificate-authority-data for your cluster. This is the base64 encoded certificate data required to communicate with your cluster." - value = aws_eks_cluster.this.certificate_authority[0].data -} - -output "cluster_endpoint" { - description = "The endpoint for your EKS Kubernetes API." - value = aws_eks_cluster.this.endpoint -} - -output "cluster_version" { - description = "The Kubernetes server version for the EKS cluster." - value = aws_eks_cluster.this.version -} - -output "cluster_security_group_id" { - description = "Security group ID attached to the EKS cluster." - value = local.cluster_security_group_id -} - -output "config_map_aws_auth" { - description = "A kubernetes configuration to authenticate to this EKS cluster." - value = data.template_file.config_map_aws_auth.rendered -} - -output "cluster_iam_role_name" { - description = "IAM role name of the EKS cluster." - value = aws_iam_role.cluster.name -} - -output "cluster_iam_role_arn" { - description = "IAM role ARN of the EKS cluster." - value = aws_iam_role.cluster.arn -} - -output "kubeconfig" { - description = "kubectl config file contents for this EKS cluster." - value = data.template_file.kubeconfig.rendered -} - -output "kubeconfig_filename" { - description = "The filename of the generated kubectl config." - value = element(concat(local_file.kubeconfig.*.filename, [""]), 0) -} - -output "kubeconfig_file" { - description = "The filename of the generated kubectl config." - value = element(concat(local_file.kubeconfig.*.filename, [""]), 0) -} - -output "workers_asg_arns" { - description = "IDs of the autoscaling groups containing workers." - value = concat( - aws_autoscaling_group.workers.*.arn, - aws_autoscaling_group.workers_launch_template.*.arn, - ) -} - -output "workers_asg_names" { - description = "Names of the autoscaling groups containing workers." - value = concat( - aws_autoscaling_group.workers.*.id, - aws_autoscaling_group.workers_launch_template.*.id, - ) -} - -output "worker_security_group_id" { - description = "Security group ID attached to the EKS workers." - value = local.worker_security_group_id -} - -output "worker_iam_role_name" { - description = "default IAM role name for EKS worker groups" - value = aws_iam_role.workers.name -} - -output "worker_iam_role_arn" { - description = "default IAM role ARN for EKS worker groups" - value = aws_iam_role.workers.arn -} - -output "worker_iam_role" { - value = aws_iam_role.workers -} - -output "worker_iam_instance_profile" { -value = aws_iam_instance_profile.workers -} - -output "eks_info" { - value = local.eks_info -} diff --git a/deploy/aws/eks/templates/config-map-aws-auth-map_accounts.yaml.tpl b/deploy/aws/eks/templates/config-map-aws-auth-map_accounts.yaml.tpl deleted file mode 100644 index 26dc5078f4..0000000000 --- a/deploy/aws/eks/templates/config-map-aws-auth-map_accounts.yaml.tpl +++ /dev/null @@ -1 +0,0 @@ - - "${account_number}" diff --git a/deploy/aws/eks/templates/config-map-aws-auth-map_roles.yaml.tpl b/deploy/aws/eks/templates/config-map-aws-auth-map_roles.yaml.tpl deleted file mode 100644 index 9f321b7be6..0000000000 --- a/deploy/aws/eks/templates/config-map-aws-auth-map_roles.yaml.tpl +++ /dev/null @@ -1,4 +0,0 @@ - - rolearn: ${role_arn} - username: ${username} - groups: - - ${group} diff --git a/deploy/aws/eks/templates/config-map-aws-auth-map_users.yaml.tpl b/deploy/aws/eks/templates/config-map-aws-auth-map_users.yaml.tpl deleted file mode 100644 index 92499de41c..0000000000 --- a/deploy/aws/eks/templates/config-map-aws-auth-map_users.yaml.tpl +++ /dev/null @@ -1,4 +0,0 @@ - - userarn: ${user_arn} - username: ${username} - groups: - - ${group} diff --git a/deploy/aws/eks/templates/config-map-aws-auth.yaml.tpl b/deploy/aws/eks/templates/config-map-aws-auth.yaml.tpl deleted file mode 100644 index 86f4f5f998..0000000000 --- a/deploy/aws/eks/templates/config-map-aws-auth.yaml.tpl +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: aws-auth - namespace: kube-system -data: - mapRoles: | -${worker_role_arn} -${map_roles} - mapUsers: | -${map_users} - mapAccounts: | -${map_accounts} diff --git a/deploy/aws/eks/templates/kubeconfig.tpl b/deploy/aws/eks/templates/kubeconfig.tpl deleted file mode 100644 index 1696391e89..0000000000 --- a/deploy/aws/eks/templates/kubeconfig.tpl +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: v1 -preferences: {} -kind: Config - -clusters: -- cluster: - server: ${endpoint} - certificate-authority-data: ${cluster_auth_base64} - name: ${kubeconfig_name} - -contexts: -- context: - cluster: ${kubeconfig_name} - user: ${kubeconfig_name} - name: ${kubeconfig_name} - -current-context: ${kubeconfig_name} - -users: -- name: ${kubeconfig_name} - user: - exec: - apiVersion: client.authentication.k8s.io/v1alpha1 - command: ${aws_authenticator_command} - args: -${aws_authenticator_command_args} -${aws_authenticator_additional_args} -${aws_authenticator_env_variables} diff --git a/deploy/aws/eks/templates/userdata.sh.tpl b/deploy/aws/eks/templates/userdata.sh.tpl deleted file mode 100644 index ba8ea2800b..0000000000 --- a/deploy/aws/eks/templates/userdata.sh.tpl +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -xe - -# Allow user supplied pre userdata code -${pre_userdata} - -# Bootstrap and join the cluster -/etc/eks/bootstrap.sh --b64-cluster-ca '${cluster_auth_base64}' --apiserver-endpoint '${endpoint}' ${bootstrap_extra_args} --kubelet-extra-args '${kubelet_extra_args}' '${cluster_name}' - -# Allow user supplied userdata code -${additional_userdata} diff --git a/deploy/aws/eks/templates/worker-role.tpl b/deploy/aws/eks/templates/worker-role.tpl deleted file mode 100644 index 2a9af5863f..0000000000 --- a/deploy/aws/eks/templates/worker-role.tpl +++ /dev/null @@ -1,5 +0,0 @@ - - rolearn: ${worker_role_arn} - username: system:node:{{EC2PrivateDNSName}} - groups: - - system:bootstrappers - - system:nodes diff --git a/deploy/aws/eks/variables.tf b/deploy/aws/eks/variables.tf deleted file mode 100644 index 9a54967a01..0000000000 --- a/deploy/aws/eks/variables.tf +++ /dev/null @@ -1,262 +0,0 @@ -variable "cluster_name" { - description = "Name of the EKS cluster. Also used as a prefix in names of related resources." -} - -variable "cluster_security_group_id" { - description = "If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingres/egress to work with the workers and provide API access to your current IP/32." - default = "" -} - -variable "cluster_version" { - description = "Kubernetes version to use for the EKS cluster." - default = "1.12" -} - -variable "operator_version" { - description = "tidb operator version" - default = "v1.0.0-beta.3" -} - -variable "config_output_path" { - description = "Where to save the Kubectl config file (if `write_kubeconfig = true`). Should end in a forward slash `/` ." - default = "./" -} - -variable "write_kubeconfig" { - description = "Whether to write a Kubectl config file containing the cluster configuration. Saved to `config_output_path`." - default = true -} - -variable "manage_aws_auth" { - description = "Whether to apply the aws-auth configmap file." - default = true -} - -variable "write_aws_auth_config" { - description = "Whether to write the aws-auth configmap file." - default = true -} - -variable "map_accounts" { - description = "Additional AWS account numbers to add to the aws-auth configmap. See examples/eks_test_fixture/variables.tf for example format." - type = list(string) - default = [] -} - -variable "map_accounts_count" { - description = "The count of accounts in the map_accounts list." - type = string - default = 0 -} - -variable "map_roles" { - description = "Additional IAM roles to add to the aws-auth configmap. See examples/eks_test_fixture/variables.tf for example format." - type = list(map(string)) - default = [] -} - -variable "map_roles_count" { - description = "The count of roles in the map_roles list." - type = string - default = 0 -} - -variable "map_users" { - description = "Additional IAM users to add to the aws-auth configmap. See examples/eks_test_fixture/variables.tf for example format." - type = list(map(string)) - default = [] -} - -variable "map_users_count" { - description = "The count of roles in the map_users list." - type = string - default = 0 -} - -variable "subnets" { - description = "A list of subnets to place the EKS cluster and workers within." - type = list(string) -} - -variable "tags" { - description = "A map of tags to add to all resources." - type = map(string) - default = {} -} - -variable "vpc_id" { - description = "VPC where the cluster and workers will be deployed." -} - -variable "worker_groups" { - description = "A list of maps defining worker group configurations to be defined using AWS Launch Configurations. See workers_group_defaults for valid keys." - type = list(map(string)) - - default = [ - { - name = "default" - }, - ] -} - -variable "worker_group_count" { - description = "The number of maps contained within the worker_groups list." - type = string - default = "1" -} - -variable "workers_group_defaults" { - description = "Override default values for target groups. See workers_group_defaults_defaults in local.tf for valid keys." - type = map(string) - default = {} -} - -variable "worker_group_tags" { - description = "A map defining extra tags to be applied to the worker group ASG." - type = map(list(string)) - - default = { - default = [] - } -} - -variable "worker_groups_launch_template" { - description = "A list of maps defining worker group configurations to be defined using AWS Launch Templates. See workers_group_defaults for valid keys." - type = list(map(string)) - - default = [ - { - name = "default" - }, - ] -} - -variable "worker_group_launch_template_count" { - description = "The number of maps contained within the worker_groups_launch_template list." - type = string - default = "0" -} - -variable "workers_group_launch_template_defaults" { - description = "Override default values for target groups. See workers_group_defaults_defaults in local.tf for valid keys." - type = map(string) - default = {} -} - -variable "worker_group_launch_template_tags" { - description = "A map defining extra tags to be applied to the worker group template ASG." - type = map(list(string)) - - default = { - default = [] - } -} - -variable "worker_security_group_id" { - description = "If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingres/egress to work with the EKS cluster." - default = "" -} - -variable "worker_ami_name_filter" { - description = "Additional name filter for AWS EKS worker AMI. Default behaviour will get latest for the cluster_version but could be set to a release from amazon-eks-ami, e.g. \"v20190220\"" - default = "v*" -} - -variable "worker_additional_security_group_ids" { - description = "A list of additional security group ids to attach to worker instances" - type = list(string) - default = [] -} - -variable "worker_sg_ingress_from_port" { - description = "Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443)." - default = "1025" -} - -variable "workers_additional_policies" { - description = "Additional policies to be added to workers" - type = list(string) - default = [] -} - -variable "workers_additional_policies_count" { - default = 0 -} - -variable "kubeconfig_aws_authenticator_command" { - description = "Command to use to fetch AWS EKS credentials." - default = "aws-iam-authenticator" -} - -variable "kubeconfig_aws_authenticator_command_args" { - description = "Default arguments passed to the authenticator command. Defaults to [token -i $cluster_name]." - type = list(string) - default = [] -} - -variable "kubeconfig_aws_authenticator_additional_args" { - description = "Any additional arguments to pass to the authenticator such as the role to assume. e.g. [\"-r\", \"MyEksRole\"]." - type = list(string) - default = [] -} - -variable "kubeconfig_aws_authenticator_env_variables" { - description = "Environment variables that should be used when executing the authenticator. e.g. { AWS_PROFILE = \"eks\"}." - type = map(string) - default = {} -} - -variable "kubeconfig_name" { - description = "Override the default name used for items kubeconfig." - default = "" -} - -variable "cluster_create_timeout" { - description = "Timeout value when creating the EKS cluster." - default = "15m" -} - -variable "cluster_delete_timeout" { - description = "Timeout value when deleting the EKS cluster." - default = "15m" -} - -variable "local_exec_interpreter" { - description = "Command to run for local-exec resources. Must be a shell-style interpreter. If you are on Windows Git Bash is a good choice." - type = list(string) - default = ["/bin/sh", "-c"] -} - -variable "cluster_create_security_group" { - description = "Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id`." - default = true -} - -variable "worker_create_security_group" { - description = "Whether to create a security group for the workers or attach the workers to `worker_security_group_id`." - default = true -} - -variable "permissions_boundary" { - description = "If provided, all IAM roles will be created with this permissions boundary attached." - default = "" -} - -variable "iam_path" { - description = "If provided, all IAM roles will be created on this path." - default = "/" -} - -variable "cluster_endpoint_private_access" { - description = "Indicates whether or not the Amazon EKS private API server endpoint is enabled." - default = false -} - -variable "cluster_endpoint_public_access" { - description = "Indicates whether or not the Amazon EKS public API server endpoint is enabled." - default = true -} - -variable "ssh_key_name" { - type = string -} diff --git a/deploy/aws/eks/workers.tf b/deploy/aws/eks/workers.tf deleted file mode 100644 index 47d60f7a75..0000000000 --- a/deploy/aws/eks/workers.tf +++ /dev/null @@ -1,354 +0,0 @@ -# Worker Groups using Launch Configurations - -resource "aws_autoscaling_group" "workers" { - name_prefix = "${aws_eks_cluster.this.name}-${lookup(local.control_worker_groups[count.index], "name", count.index)}" - desired_capacity = lookup( - local.control_worker_groups[count.index], - "asg_desired_capacity", - local.workers_group_defaults["asg_desired_capacity"], - ) - max_size = lookup( - local.control_worker_groups[count.index], - "asg_max_size", - local.workers_group_defaults["asg_max_size"], - ) - min_size = lookup( - local.control_worker_groups[count.index], - "asg_min_size", - local.workers_group_defaults["asg_min_size"], - ) - force_delete = lookup( - local.control_worker_groups[count.index], - "asg_force_delete", - local.workers_group_defaults["asg_force_delete"], - ) - launch_configuration = element(aws_launch_configuration.workers.*.id, count.index) - vpc_zone_identifier = split( - ",", - coalesce( - lookup(local.control_worker_groups[count.index], "subnets", ""), - local.workers_group_defaults["subnets"], - ), - ) - protect_from_scale_in = lookup( - local.control_worker_groups[count.index], - "protect_from_scale_in", - local.workers_group_defaults["protect_from_scale_in"], - ) - count = var.worker_group_count - placement_group = lookup( - local.control_worker_groups[count.index], - "placement_group", - local.workers_group_defaults["placement_group"], - ) - - tags = concat( - [ - { - key = "Name" - value = "${aws_eks_cluster.this.name}-${lookup(local.control_worker_groups[count.index], "name", count.index)}-eks_asg" - propagate_at_launch = true - }, - { - key = "kubernetes.io/cluster/${aws_eks_cluster.this.name}" - value = "owned" - propagate_at_launch = true - }, - { - key = "k8s.io/cluster-autoscaler/${lookup( - local.control_worker_groups[count.index], - "autoscaling_enabled", - local.workers_group_defaults["autoscaling_enabled"], - ) == 1 ? "enabled" : "disabled"}" - value = "true" - propagate_at_launch = false - }, - # { - # "key" = "k8s.io/cluster-autoscaler/${aws_eks_cluster.this.name}" - # "value" = "" - # "propagate_at_launch" = false - # }, - { - key = "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage" - value = "${lookup( - local.control_worker_groups[count.index], - "root_volume_size", - local.workers_group_defaults["root_volume_size"], - )}Gi" - propagate_at_launch = false - }, - ], - local.asg_tags, - var.worker_group_tags[contains( - keys(var.worker_group_tags), - lookup(local.control_worker_groups[count.index], "name", count.index), - ) ? lookup(local.control_worker_groups[count.index], "name", count.index) : "default"], - ) - - - lifecycle { - create_before_destroy = true - # ignore_changes = ["desired_capacity"] - } -} - -resource "aws_launch_configuration" "workers" { - name_prefix = "${aws_eks_cluster.this.name}-${lookup(local.control_worker_groups[count.index], "name", count.index)}" - associate_public_ip_address = lookup( - local.control_worker_groups[count.index], - "public_ip", - local.workers_group_defaults["public_ip"], - ) - security_groups = concat([local.worker_security_group_id], var.worker_additional_security_group_ids, compact( - split( - ",", - lookup( - local.control_worker_groups[count.index], - "additional_security_group_ids", - local.workers_group_defaults["additional_security_group_ids"], - ), - ), - )) - iam_instance_profile = element(aws_iam_instance_profile.workers.*.id, count.index) - image_id = lookup( - local.control_worker_groups[count.index], - "ami_id", - local.workers_group_defaults["ami_id"], - ) - instance_type = lookup( - local.control_worker_groups[count.index], - "instance_type", - local.workers_group_defaults["instance_type"], - ) - key_name = lookup( - local.control_worker_groups[count.index], - "key_name", - local.workers_group_defaults["key_name"], - ) - user_data_base64 = base64encode(element(data.template_file.userdata.*.rendered, count.index)) - ebs_optimized = lookup( - local.control_worker_groups[count.index], - "ebs_optimized", - lookup( - local.ebs_optimized, - lookup( - local.control_worker_groups[count.index], - "instance_type", - local.workers_group_defaults["instance_type"], - ), - false, - ), - ) - enable_monitoring = lookup( - local.control_worker_groups[count.index], - "enable_monitoring", - local.workers_group_defaults["enable_monitoring"], - ) - spot_price = lookup( - local.control_worker_groups[count.index], - "spot_price", - local.workers_group_defaults["spot_price"], - ) - placement_tenancy = lookup( - local.control_worker_groups[count.index], - "placement_tenancy", - local.workers_group_defaults["placement_tenancy"], - ) - count = var.worker_group_count - - lifecycle { - create_before_destroy = true - } - - root_block_device { - volume_size = lookup( - local.control_worker_groups[count.index], - "root_volume_size", - local.workers_group_defaults["root_volume_size"], - ) - volume_type = lookup( - local.control_worker_groups[count.index], - "root_volume_type", - local.workers_group_defaults["root_volume_type"], - ) - iops = lookup( - local.control_worker_groups[count.index], - "root_iops", - local.workers_group_defaults["root_iops"], - ) - delete_on_termination = true - } -} - -resource "aws_security_group" "workers" { - name_prefix = aws_eks_cluster.this.name - description = "Security group for all nodes in the cluster." - vpc_id = var.vpc_id - count = var.worker_create_security_group ? 1 : 0 - tags = merge( - var.tags, - { - Name = "${aws_eks_cluster.this.name}-eks_worker_sg" - "kubernetes.io/cluster/${aws_eks_cluster.this.name}" = "owned" - }, - ) -} - -resource "aws_security_group_rule" "workers_egress_internet" { - description = "Allow nodes all egress to the Internet." - protocol = "-1" - security_group_id = aws_security_group.workers[0].id - cidr_blocks = ["0.0.0.0/0"] - from_port = 0 - to_port = 0 - type = "egress" - count = var.worker_create_security_group ? 1 : 0 -} - -resource "aws_security_group_rule" "workers_ingress_self" { - description = "Allow node to communicate with each other." - protocol = "-1" - security_group_id = aws_security_group.workers[0].id - source_security_group_id = aws_security_group.workers[0].id - from_port = 0 - to_port = 65535 - type = "ingress" - count = var.worker_create_security_group ? 1 : 0 -} - -resource "aws_security_group_rule" "workers_ingress_cluster" { - description = "Allow workers pods to receive communication from the cluster control plane." - protocol = "tcp" - security_group_id = aws_security_group.workers[0].id - source_security_group_id = local.cluster_security_group_id - from_port = var.worker_sg_ingress_from_port - to_port = 65535 - type = "ingress" - count = var.worker_create_security_group ? 1 : 0 -} - -resource "aws_security_group_rule" "workers_ingress_cluster_kubelet" { - description = "Allow workers Kubelets to receive communication from the cluster control plane." - protocol = "tcp" - security_group_id = aws_security_group.workers[0].id - source_security_group_id = local.cluster_security_group_id - from_port = 10250 - to_port = 10250 - type = "ingress" - count = var.worker_create_security_group ? var.worker_sg_ingress_from_port > 10250 ? 1 : 0 : 0 -} - -resource "aws_security_group_rule" "workers_ingress_cluster_https" { - description = "Allow pods running extension API servers on port 443 to receive communication from cluster control plane." - protocol = "tcp" - security_group_id = aws_security_group.workers[0].id - source_security_group_id = local.cluster_security_group_id - from_port = 443 - to_port = 443 - type = "ingress" - count = var.worker_create_security_group ? 1 : 0 -} - -resource "aws_iam_role" "workers" { - name_prefix = aws_eks_cluster.this.name - assume_role_policy = data.aws_iam_policy_document.workers_assume_role_policy.json - permissions_boundary = var.permissions_boundary - path = var.iam_path - force_detach_policies = true -} - -resource "aws_iam_instance_profile" "workers" { - name_prefix = aws_eks_cluster.this.name - role = lookup( - local.control_worker_groups[count.index], - "iam_role_id", - local.workers_group_defaults["iam_role_id"], - ) - count = var.worker_group_count - path = var.iam_path -} - -resource "aws_iam_role_policy_attachment" "workers_AmazonEKSWorkerNodePolicy" { - policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" - role = aws_iam_role.workers.name -} - -resource "aws_iam_role_policy_attachment" "workers_AmazonEKS_CNI_Policy" { - policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" - role = aws_iam_role.workers.name -} - -resource "aws_iam_role_policy_attachment" "workers_AmazonEC2ContainerRegistryReadOnly" { - policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" - role = aws_iam_role.workers.name -} - -resource "aws_iam_role_policy_attachment" "workers_additional_policies" { - count = var.workers_additional_policies_count - role = aws_iam_role.workers.name - policy_arn = var.workers_additional_policies[count.index] -} - -resource "null_resource" "tags_as_list_of_maps" { - count = length(keys(var.tags)) - - triggers = { - key = element(keys(var.tags), count.index) - value = element(values(var.tags), count.index) - propagate_at_launch = "true" - } -} - -resource "aws_iam_role_policy_attachment" "workers_autoscaling" { - policy_arn = aws_iam_policy.worker_autoscaling.arn - role = aws_iam_role.workers.name -} - -resource "aws_iam_policy" "worker_autoscaling" { - name_prefix = "eks-worker-autoscaling-${aws_eks_cluster.this.name}" - description = "EKS worker node autoscaling policy for cluster ${aws_eks_cluster.this.name}" - policy = data.aws_iam_policy_document.worker_autoscaling.json - path = var.iam_path -} - -data "aws_iam_policy_document" "worker_autoscaling" { - statement { - sid = "eksWorkerAutoscalingAll" - effect = "Allow" - - actions = [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "ec2:DescribeLaunchTemplateVersions", - ] - - resources = ["*"] - } - - statement { - sid = "eksWorkerAutoscalingOwn" - effect = "Allow" - - actions = [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "autoscaling:UpdateAutoScalingGroup", - ] - - resources = ["*"] - - condition { - test = "StringEquals" - variable = "autoscaling:ResourceTag/kubernetes.io/cluster/${aws_eks_cluster.this.name}" - values = ["owned"] - } - - condition { - test = "StringEquals" - variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled" - values = ["true"] - } - } -} diff --git a/deploy/aws/eks/workers_launch_template.tf b/deploy/aws/eks/workers_launch_template.tf deleted file mode 100644 index 8a84575c6f..0000000000 --- a/deploy/aws/eks/workers_launch_template.tf +++ /dev/null @@ -1,298 +0,0 @@ -# Worker Groups using Launch Templates - -resource "aws_autoscaling_group" "workers_launch_template" { - name_prefix = "${aws_eks_cluster.this.name}-${lookup( - var.worker_groups_launch_template[count.index], - "name", - count.index, - )}" - desired_capacity = lookup( - var.worker_groups_launch_template[count.index], - "asg_desired_capacity", - local.workers_group_launch_template_defaults["asg_desired_capacity"], - ) - max_size = lookup( - var.worker_groups_launch_template[count.index], - "asg_max_size", - local.workers_group_launch_template_defaults["asg_max_size"], - ) - min_size = lookup( - var.worker_groups_launch_template[count.index], - "asg_min_size", - local.workers_group_launch_template_defaults["asg_min_size"], - ) - force_delete = lookup( - var.worker_groups_launch_template[count.index], - "asg_force_delete", - local.workers_group_launch_template_defaults["asg_force_delete"], - ) - - mixed_instances_policy { - instances_distribution { - on_demand_allocation_strategy = lookup( - var.worker_groups_launch_template[count.index], - "on_demand_allocation_strategy", - local.workers_group_launch_template_defaults["on_demand_allocation_strategy"], - ) - on_demand_base_capacity = lookup( - var.worker_groups_launch_template[count.index], - "on_demand_base_capacity", - local.workers_group_launch_template_defaults["on_demand_base_capacity"], - ) - on_demand_percentage_above_base_capacity = lookup( - var.worker_groups_launch_template[count.index], - "on_demand_percentage_above_base_capacity", - local.workers_group_launch_template_defaults["on_demand_percentage_above_base_capacity"], - ) - spot_allocation_strategy = lookup( - var.worker_groups_launch_template[count.index], - "spot_allocation_strategy", - local.workers_group_launch_template_defaults["spot_allocation_strategy"], - ) - spot_instance_pools = lookup( - var.worker_groups_launch_template[count.index], - "spot_instance_pools", - local.workers_group_launch_template_defaults["spot_instance_pools"], - ) - spot_max_price = lookup( - var.worker_groups_launch_template[count.index], - "spot_max_price", - local.workers_group_launch_template_defaults["spot_max_price"], - ) - } - - launch_template { - launch_template_specification { - launch_template_id = element( - aws_launch_template.workers_launch_template.*.id, - count.index, - ) - version = "$Latest" - } - - override { - instance_type = lookup( - var.worker_groups_launch_template[count.index], - "instance_type", - local.workers_group_launch_template_defaults["instance_type"], - ) - } - - override { - instance_type = lookup( - var.worker_groups_launch_template[count.index], - "override_instance_type", - local.workers_group_launch_template_defaults["override_instance_type"], - ) - } - } - } - - vpc_zone_identifier = split( - ",", - coalesce( - lookup( - var.worker_groups_launch_template[count.index], - "subnets", - "", - ), - local.workers_group_launch_template_defaults["subnets"], - ), - ) - protect_from_scale_in = lookup( - var.worker_groups_launch_template[count.index], - "protect_from_scale_in", - local.workers_group_launch_template_defaults["protect_from_scale_in"], - ) - count = var.worker_group_launch_template_count - - tags = concat( - [ - { - key = "Name" - value = "${aws_eks_cluster.this.name}-${lookup( - var.worker_groups_launch_template[count.index], - "name", - count.index, - )}-eks_asg" - propagate_at_launch = true - }, - { - key = "kubernetes.io/cluster/${aws_eks_cluster.this.name}" - value = "owned" - propagate_at_launch = true - }, - { - key = "k8s.io/cluster-autoscaler/${lookup( - var.worker_groups_launch_template[count.index], - "autoscaling_enabled", - local.workers_group_launch_template_defaults["autoscaling_enabled"], - ) == 1 ? "enabled" : "disabled"}" - value = "true" - propagate_at_launch = false - }, - { - key = "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage" - value = "${lookup( - var.worker_groups_launch_template[count.index], - "root_volume_size", - local.workers_group_launch_template_defaults["root_volume_size"], - )}Gi" - propagate_at_launch = false - }, - ], - local.asg_tags, - var.worker_group_launch_template_tags[contains( - keys(var.worker_group_launch_template_tags), - lookup( - var.worker_groups_launch_template[count.index], - "name", - count.index, - ), - ) ? lookup( - var.worker_groups_launch_template[count.index], - "name", - count.index, - ) : "default"], - ) - - lifecycle { - create_before_destroy = true - - ignore_changes = [desired_capacity] - } -} - -resource "aws_launch_template" "workers_launch_template" { - name_prefix = "${aws_eks_cluster.this.name}-${lookup( - var.worker_groups_launch_template[count.index], - "name", - count.index, - )}" - - network_interfaces { - associate_public_ip_address = lookup( - var.worker_groups_launch_template[count.index], - "public_ip", - local.workers_group_launch_template_defaults["public_ip"], - ) - security_groups = concat([local.worker_security_group_id], var.worker_additional_security_group_ids, compact( - split( - ",", - lookup( - var.worker_groups_launch_template[count.index], - "additional_security_group_ids", - local.workers_group_launch_template_defaults["additional_security_group_ids"], - ), - ), - )) - } - - iam_instance_profile { - name = element( - aws_iam_instance_profile.workers_launch_template.*.name, - count.index, - ) - } - - image_id = lookup( - var.worker_groups_launch_template[count.index], - "ami_id", - local.workers_group_launch_template_defaults["ami_id"], - ) - instance_type = lookup( - var.worker_groups_launch_template[count.index], - "instance_type", - local.workers_group_launch_template_defaults["instance_type"], - ) - key_name = lookup( - var.worker_groups_launch_template[count.index], - "key_name", - local.workers_group_launch_template_defaults["key_name"], - ) - user_data = base64encode( - element( - data.template_file.launch_template_userdata.*.rendered, - count.index, - ), - ) - ebs_optimized = lookup( - var.worker_groups_launch_template[count.index], - "ebs_optimized", - lookup( - local.ebs_optimized, - lookup( - var.worker_groups_launch_template[count.index], - "instance_type", - local.workers_group_launch_template_defaults["instance_type"], - ), - false, - ), - ) - - monitoring { - enabled = lookup( - var.worker_groups_launch_template[count.index], - "enable_monitoring", - local.workers_group_launch_template_defaults["enable_monitoring"], - ) - } - - placement { - tenancy = lookup( - var.worker_groups_launch_template[count.index], - "placement_tenancy", - local.workers_group_launch_template_defaults["placement_tenancy"], - ) - } - - count = var.worker_group_launch_template_count - - lifecycle { - create_before_destroy = true - } - - block_device_mappings { - device_name = data.aws_ami.eks_worker.root_device_name - - ebs { - volume_size = lookup( - var.worker_groups_launch_template[count.index], - "root_volume_size", - local.workers_group_launch_template_defaults["root_volume_size"], - ) - volume_type = lookup( - var.worker_groups_launch_template[count.index], - "root_volume_type", - local.workers_group_launch_template_defaults["root_volume_type"], - ) - iops = lookup( - var.worker_groups_launch_template[count.index], - "root_iops", - local.workers_group_launch_template_defaults["root_iops"], - ) - encrypted = lookup( - var.worker_groups_launch_template[count.index], - "root_encrypted", - local.workers_group_launch_template_defaults["root_encrypted"], - ) - kms_key_id = lookup( - var.worker_groups_launch_template[count.index], - "kms_key_id", - local.workers_group_launch_template_defaults["kms_key_id"], - ) - delete_on_termination = true - } - } -} - -resource "aws_iam_instance_profile" "workers_launch_template" { - name_prefix = aws_eks_cluster.this.name - role = lookup( - var.worker_groups_launch_template[count.index], - "iam_role_id", - local.workers_group_launch_template_defaults["iam_role_id"], - ) - count = var.worker_group_launch_template_count - path = var.iam_path -} diff --git a/deploy/aws/local.tf b/deploy/aws/local.tf deleted file mode 100644 index cc0421aa5c..0000000000 --- a/deploy/aws/local.tf +++ /dev/null @@ -1,5 +0,0 @@ -locals { - default_subnets = split(",", var.create_vpc ? join(",", module.vpc.private_subnets) : join(",", var.subnets)) - default_eks = module.eks.eks_info - kubeconfig = "${path.module}/credentials/kubeconfig-${var.eks_name}" -} \ No newline at end of file diff --git a/deploy/aws/main.tf b/deploy/aws/main.tf index 62a4f7d77c..014633de51 100644 --- a/deploy/aws/main.tf +++ b/deploy/aws/main.tf @@ -2,6 +2,11 @@ provider "aws" { region = var.region } +locals { + default_subnets = split(",", var.create_vpc ? join(",", module.vpc.private_subnets) : join(",", var.subnets)) + default_eks = module.tidb-operator.eks +} + module "key-pair" { source = "./aws-key-pair" name = var.eks_name @@ -11,11 +16,10 @@ module "key-pair" { module "vpc" { source = "terraform-aws-modules/vpc/aws" - version = "2.6.0" - name = var.eks_name - cidr = var.vpc_cidr - create_vpc = var.create_vpc - # azs = [data.aws_availability_zones.available.names[0], data.aws_availability_zones.available.names[1], data.aws_availability_zones.available.names[2]] + version = "2.6.0" + name = var.eks_name + cidr = var.vpc_cidr + create_vpc = var.create_vpc azs = data.aws_availability_zones.available.names private_subnets = var.private_subnets public_subnets = var.public_subnets @@ -34,17 +38,14 @@ module "vpc" { } } -module "eks" { - source = "./eks" - cluster_name = var.eks_name - cluster_version = var.eks_version +module "tidb-operator" { + source = "./tidb-operator" + + eks_name = var.eks_name + eks_version = var.eks_version operator_version = var.operator_version - ssh_key_name = module.key-pair.key_name config_output_path = "credentials/" subnets = local.default_subnets vpc_id = var.create_vpc ? module.vpc.vpc_id : var.vpc_id - - tags = { - app = "tidb" - } + ssh_key_name = module.key-pair.key_name } diff --git a/deploy/aws/outputs.tf b/deploy/aws/outputs.tf index 73b80ba04f..182120d129 100644 --- a/deploy/aws/outputs.tf +++ b/deploy/aws/outputs.tf @@ -10,12 +10,12 @@ output "eks_version" { output "eks_endpoint" { description = "Endpoint for EKS control plane." - value = module.eks.cluster_endpoint + value = module.tidb-operator.eks.cluster_endpoint } output "kubeconfig_filename" { description = "The filename of the generated kubectl config." - value = module.eks.kubeconfig_filename + value = module.tidb-operator.eks.kubeconfig_filename } output "default-cluster_tidb-dns" { diff --git a/deploy/aws/pd-userdata.sh b/deploy/aws/pd-userdata.sh deleted file mode 100644 index e2bfc401b7..0000000000 --- a/deploy/aws/pd-userdata.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash - -# set ulimits -cat < /etc/security/limits.d/99-tidb.conf -root soft nofile 1000000 -root hard nofile 1000000 -root soft core unlimited -root soft stack 10240 -EOF -# config docker ulimit -cp /usr/lib/systemd/system/docker.service /etc/systemd/system/docker.service -sed -i 's/LimitNOFILE=infinity/LimitNOFILE=1048576/' /etc/systemd/system/docker.service -sed -i 's/LimitNPROC=infinity/LimitNPROC=1048576/' /etc/systemd/system/docker.service -systemctl daemon-reload -systemctl restart docker - -# format and mount nvme disk -if grep nvme1n1 /etc/fstab; then - echo "disk already mounted" -else - mkfs -t ext4 /dev/nvme1n1 - mkdir -p /mnt/disks/ssd1 - cat <> /etc/fstab -/dev/nvme1n1 /mnt/disks/ssd1 ext4 defaults,nofail,noatime,nodelalloc 0 2 -EOF - mount -a -fi diff --git a/deploy/aws/tidb-cluster/README.md b/deploy/aws/tidb-cluster/README.md new file mode 100644 index 0000000000..4166ae6c78 --- /dev/null +++ b/deploy/aws/tidb-cluster/README.md @@ -0,0 +1,7 @@ +The `tidb-cluster` module for AWS spins up a TiDB cluster in the specified `EKS` cluster. The following resources will be provisioned: + +- An auto scaling group for PD +- An auto scaling group for TiKV +- An auto scaling group for TiDB +- An auto scaling group for Monitoring +- A `TidbCluster` custom resource diff --git a/deploy/aws/tidb-cluster/cluster.tf b/deploy/aws/tidb-cluster/cluster.tf index 67200d437a..643724bebe 100644 --- a/deploy/aws/tidb-cluster/cluster.tf +++ b/deploy/aws/tidb-cluster/cluster.tf @@ -1,74 +1,145 @@ -#resource "local_file" "kubeconfig_for_cleanup" { -# content = var.eks_info.kubeconfig -# filename = "${var.eks_info.kubeconfig_file}_for_${var.cluster_name}_cleanup" -#} +# kubernetes and helm providers rely on EKS, but terraform provider doesn't support depends_on +# follow this link https://github.com/hashicorp/terraform/issues/2430#issuecomment-370685911 +# we have the following hack +resource "local_file" "kubeconfig" { + depends_on = [var.eks] + sensitive_content = var.eks.kubeconfig + filename = var.eks.kubeconfig_filename +} -resource "null_resource" "deploy-cluster" { +provider "helm" { + insecure = true + # service_account = "tiller" + # install_tiller = true # currently this doesn't work, so we install tiller in the local-exec provisioner. See https://github.com/terraform-providers/terraform-provider-helm/issues/148 + kubernetes { + config_path = local_file.kubeconfig.filename + } +} + +resource "null_resource" "wait-tiller-ready" { + depends_on = [var.eks] provisioner "local-exec" { - # EKS writes kube_config to path.cwd/kubeconfig_file - # Helm values files are managed in path.cwd working_dir = path.cwd - - command = < kubeconfig_cleanup_${var.cluster_name} -#kubectl delete -n ${var.cluster_name} svc ${var.cluster_name}-pd -#kubectl delete -n ${var.cluster_name} svc ${var.cluster_name}-grafana -#kubectl get pvc -n ${var.cluster_name} -o jsonpath='{.items[*].spec.volumeName}'|fmt -1 | xargs -I {} kubectl patch pv {} -p '{"spec":{"persistentVolumeReclaimPolicy":"Delete"}}' -#kubectl delete pvc -n ${var.cluster_name} --all -#rm kubeconfig_cleanup_${var.cluster_name} -#EOT -# -# interpreter = var.local_exec_interpreter -# environment = { -# KUBECONFIG = "kubeconfig_cleanup_${var.cluster_name}" -# } -# } -#} diff --git a/deploy/aws/tidb-cluster/data.tf b/deploy/aws/tidb-cluster/data.tf index 3e9b981ec3..0a38de9a30 100644 --- a/deploy/aws/tidb-cluster/data.tf +++ b/deploy/aws/tidb-cluster/data.tf @@ -1,7 +1,7 @@ data "aws_ami" "eks_worker" { filter { name = "name" - values = ["amazon-eks-node-${var.eks_info.version}-${var.worker_ami_name_filter}"] + values = ["amazon-eks-node-${var.eks.cluster_version}-${var.worker_ami_name_filter}"] } most_recent = true @@ -12,12 +12,12 @@ data "aws_ami" "eks_worker" { data "template_file" "userdata" { template = file("${path.module}/templates/userdata.sh.tpl") - count = local.worker_group_count + count = local.worker_group_count vars = { - cluster_name = var.eks_info.name - endpoint = var.eks_info.endpoint - cluster_auth_base64 = var.eks_info.ca + cluster_name = var.eks.cluster_id + endpoint = var.eks.cluster_endpoint + cluster_auth_base64 = var.eks.cluster_certificate_authority_data pre_userdata = lookup( local.tidb_cluster_worker_groups[count.index], "pre_userdata", @@ -43,12 +43,12 @@ data "template_file" "userdata" { data "template_file" "launch_template_userdata" { template = file("${path.module}/templates/userdata.sh.tpl") - count = var.worker_group_launch_template_count + count = var.worker_group_launch_template_count vars = { - cluster_name = var.eks_info.name - endpoint = var.eks_info.endpoint - cluster_auth_base64 = var.eks_info.ca + cluster_name = var.eks.cluster_name + endpoint = var.eks.cluster_endpoint + cluster_auth_base64 = var.eks.cluster_certificate_authority_data pre_userdata = lookup( var.worker_groups_launch_template[count.index], "pre_userdata", @@ -73,13 +73,13 @@ data "template_file" "launch_template_userdata" { } data "external" "tidb_elb" { - depends_on = [null_resource.deploy-cluster] + depends_on = [helm_release.tidb-cluster] working_dir = path.cwd - program = ["bash", "-c", "kubectl --kubeconfig ${var.eks_info.kubeconfig_file} get svc -n ${var.cluster_name} ${var.cluster_name}-tidb -o json | jq '.status.loadBalancer.ingress[0]'"] + program = ["bash", "-c", "kubectl --kubeconfig ${var.eks.kubeconfig_filename} get svc -n ${var.cluster_name} ${var.cluster_name}-tidb -o json | jq '.status.loadBalancer.ingress[0]'"] } data "external" "monitor_elb" { - depends_on = [null_resource.deploy-cluster] + depends_on = [helm_release.tidb-cluster] working_dir = path.cwd - program = ["bash", "-c", "kubectl --kubeconfig ${var.eks_info.kubeconfig_file} get svc -n ${var.cluster_name} ${var.cluster_name}-grafana -o json | jq '.status.loadBalancer.ingress[0]'"] + program = ["bash", "-c", "kubectl --kubeconfig ${var.eks.kubeconfig_filename} get svc -n ${var.cluster_name} ${var.cluster_name}-grafana -o json | jq '.status.loadBalancer.ingress[0]'"] } diff --git a/deploy/aws/tidb-cluster/local.tf b/deploy/aws/tidb-cluster/local.tf index e89d636979..4cea288184 100644 --- a/deploy/aws/tidb-cluster/local.tf +++ b/deploy/aws/tidb-cluster/local.tf @@ -1,16 +1,7 @@ locals { asg_tags = null_resource.tags_as_list_of_maps.*.triggers - # cluster_security_group_id = coalesce( - # join("", aws_security_group.cluster.*.id), - # var.cluster_security_group_id, - # ) - - # worker_security_group_id = coalesce( - # join("", aws_security_group.workers.*.id), - # var.worker_security_group_id, - # ) - default_iam_role_id = element(concat(var.eks_info.worker_iam_role.*.id, [""]), 0) + default_iam_role_id = var.eks.worker_iam_role_name workers_group_defaults_defaults = { name = "count.index" # Name of the worker group. Literal count.index will never be used but if name is not set, the count.index interpolation will be used. @@ -145,6 +136,7 @@ locals { var.workers_group_launch_template_defaults, ) + # Taken from https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml ebs_optimized = { "c1.medium" = false "c1.xlarge" = true diff --git a/deploy/aws/values/default.yaml b/deploy/aws/tidb-cluster/values/default.yaml similarity index 81% rename from deploy/aws/values/default.yaml rename to deploy/aws/tidb-cluster/values/default.yaml index ec3b964122..e38634ffa0 100644 --- a/deploy/aws/values/default.yaml +++ b/deploy/aws/tidb-cluster/values/default.yaml @@ -1,11 +1,12 @@ # Basic customization for tidb-cluster chart that suits AWS environment -# It is recommended to make a copy of this file and customize for each of your TiDB cluster timezone: UTC pd: logLevel: info + storageClassName: ebs-gp2 tikv: logLevel: info + stroageClassName: local-storage syncLog: true tidb: logLevel: info @@ -17,6 +18,8 @@ tidb: monitor: storage: 100Gi + storageClassName: ebs-gp2 + persistent: true grafana: config: GF_AUTH_ANONYMOUS_ENABLED: "true" diff --git a/deploy/aws/tidb-cluster/variables.tf b/deploy/aws/tidb-cluster/variables.tf index bac21c8cf6..5c6e315232 100644 --- a/deploy/aws/tidb-cluster/variables.tf +++ b/deploy/aws/tidb-cluster/variables.tf @@ -100,16 +100,16 @@ variable "iam_path" { variable "tidb_cluster_chart_version" { description = "tidb-cluster chart version" - default = "v1.0.0-beta.3" + default = "v1.0.0-beta.3" } variable "cluster_name" { - type = string + type = string description = "tidb cluster name" } variable "cluster_version" { - type = string + type = string default = "v3.0.0-rc.2" } @@ -118,59 +118,45 @@ variable "ssh_key_name" { } variable "pd_count" { - type = number + type = number default = 1 } variable "tikv_count" { - type = number + type = number default = 1 } variable "tidb_count" { - type = number + type = number default = 1 } variable "pd_instance_type" { - type = string + type = string default = "c5d.large" } variable "tikv_instance_type" { - type = string + type = string default = "c5d.large" } variable "tidb_instance_type" { - type = string + type = string default = "c5d.large" } variable "monitor_instance_type" { - type = string + type = string default = "c5d.large" } -variable "monitor_storage_class" { - type = string - default = "ebs-gp2" -} - -variable "pd_storage_class" { - type = string - default = "ebs-gp2" -} - -variable "tikv_storage_class" { - type = string - default = "local-storage" -} - variable "override_values" { - type = string + type = string + default = "" } -variable "eks_info" { +variable "eks" { description = "eks info" } diff --git a/deploy/aws/tidb-cluster/workers.tf b/deploy/aws/tidb-cluster/workers.tf index 4f00c4ea86..1fab065160 100644 --- a/deploy/aws/tidb-cluster/workers.tf +++ b/deploy/aws/tidb-cluster/workers.tf @@ -1,7 +1,7 @@ # Worker Groups using Launch Configurations resource "aws_autoscaling_group" "workers" { - name_prefix = "${var.eks_info.name}-${lookup(local.tidb_cluster_worker_groups[count.index], "name", count.index)}" + name_prefix = "${var.eks.cluster_id}-${lookup(local.tidb_cluster_worker_groups[count.index], "name", count.index)}" desired_capacity = lookup( local.tidb_cluster_worker_groups[count.index], "asg_desired_capacity", @@ -17,7 +17,7 @@ resource "aws_autoscaling_group" "workers" { "asg_min_size", local.workers_group_defaults["asg_min_size"], ) - force_delete = false + force_delete = false launch_configuration = element(aws_launch_configuration.workers.*.id, count.index) vpc_zone_identifier = split( ",", @@ -27,19 +27,19 @@ resource "aws_autoscaling_group" "workers" { ), ) protect_from_scale_in = false - count = local.worker_group_count - placement_group = "" # The name of the placement group into which to launch the instances, if any. + count = local.worker_group_count + placement_group = "" # The name of the placement group into which to launch the instances, if any. tags = concat( [ { - key = "Name" - value = "${var.eks_info.name}-${lookup(local.tidb_cluster_worker_groups[count.index], "name", count.index)}-eks_asg" + key = "Name" + value = "${var.eks.cluster_id}-${lookup(local.tidb_cluster_worker_groups[count.index], "name", count.index)}-eks_asg" propagate_at_launch = true }, { - key = "kubernetes.io/cluster/${var.eks_info.name}" - value = "owned" + key = "kubernetes.io/cluster/${var.eks.cluster_id}" + value = "owned" propagate_at_launch = true }, { @@ -48,14 +48,9 @@ resource "aws_autoscaling_group" "workers" { "autoscaling_enabled", local.workers_group_defaults["autoscaling_enabled"], ) == 1 ? "enabled" : "disabled"}" - value = "true" + value = "true" propagate_at_launch = false }, - # { - # "key" = "k8s.io/cluster-autoscaler/${var.eks_info.name}" - # "value" = "" - # "propagate_at_launch" = false - # }, { key = "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage" value = "${lookup( @@ -81,13 +76,13 @@ resource "aws_autoscaling_group" "workers" { } resource "aws_launch_configuration" "workers" { - name_prefix = "${var.eks_info.name}-${lookup(local.tidb_cluster_worker_groups[count.index], "name", count.index)}" + name_prefix = "${var.eks.cluster_id}-${lookup(local.tidb_cluster_worker_groups[count.index], "name", count.index)}" associate_public_ip_address = lookup( local.tidb_cluster_worker_groups[count.index], "public_ip", local.workers_group_defaults["public_ip"], ) - security_groups = concat([var.eks_info.worker_security_group_id], var.worker_additional_security_group_ids, compact( + security_groups = concat([var.eks.worker_security_group_id], var.worker_additional_security_group_ids, compact( split( ",", lookup( @@ -97,7 +92,7 @@ resource "aws_launch_configuration" "workers" { ), ), )) - iam_instance_profile = element(var.eks_info.worker_iam_instance_profile.*.id, count.index) + iam_instance_profile = element(var.eks.worker_iam_instance_profile_names, count.index) image_id = lookup( local.tidb_cluster_worker_groups[count.index], "ami_id", diff --git a/deploy/aws/tidb-cluster/workers_launch_template.tf b/deploy/aws/tidb-cluster/workers_launch_template.tf index d93554e849..b767d7dce4 100644 --- a/deploy/aws/tidb-cluster/workers_launch_template.tf +++ b/deploy/aws/tidb-cluster/workers_launch_template.tf @@ -1,7 +1,7 @@ # Worker Groups using Launch Templates resource "aws_autoscaling_group" "workers_launch_template" { - name_prefix = "${var.eks_info.name}-${lookup( + name_prefix = "${var.eks.cluster_id}-${lookup( var.worker_groups_launch_template[count.index], "name", count.index, @@ -111,7 +111,7 @@ resource "aws_autoscaling_group" "workers_launch_template" { [ { key = "Name" - value = "${var.eks_info.name}-${lookup( + value = "${var.eks.cluster_id}-${lookup( var.worker_groups_launch_template[count.index], "name", count.index, @@ -119,8 +119,8 @@ resource "aws_autoscaling_group" "workers_launch_template" { propagate_at_launch = true }, { - key = "kubernetes.io/cluster/${var.eks_info.name}" - value = "owned" + key = "kubernetes.io/cluster/${var.eks.cluster_id}" + value = "owned" propagate_at_launch = true }, { @@ -129,7 +129,7 @@ resource "aws_autoscaling_group" "workers_launch_template" { "autoscaling_enabled", local.workers_group_launch_template_defaults["autoscaling_enabled"], ) == 1 ? "enabled" : "disabled"}" - value = "true" + value = "true" propagate_at_launch = false }, { @@ -165,7 +165,7 @@ resource "aws_autoscaling_group" "workers_launch_template" { } resource "aws_launch_template" "workers_launch_template" { - name_prefix = "${var.eks_info.name}-${lookup( + name_prefix = "${var.eks.cluster_id}-${lookup( var.worker_groups_launch_template[count.index], "name", count.index, @@ -177,7 +177,7 @@ resource "aws_launch_template" "workers_launch_template" { "public_ip", local.workers_group_launch_template_defaults["public_ip"], ) - security_groups = concat([var.eks_info.worker_security_group_id], var.worker_additional_security_group_ids, compact( + security_groups = concat([var.eks.worker_security_group_id], var.worker_additional_security_group_ids, compact( split( ",", lookup( @@ -288,7 +288,7 @@ resource "aws_launch_template" "workers_launch_template" { } resource "aws_iam_instance_profile" "workers_launch_template" { - name_prefix = var.eks_info.name + name_prefix = var.eks.cluster_id role = lookup( var.worker_groups_launch_template[count.index], "iam_role_id", diff --git a/deploy/aws/tidb-operator/README.md b/deploy/aws/tidb-operator/README.md new file mode 100644 index 0000000000..6b565f945d --- /dev/null +++ b/deploy/aws/tidb-operator/README.md @@ -0,0 +1,7 @@ +The `tidb-operator` module for AWS spins up a control plane for TiDB in Kubernetes. The following resources will be provisioned: + +- An EKS cluster +- A auto scaling group to run the control pods listed below +- TiDB operator, including `tidb-controller-manager` and `tidb-scheduler` +- local-volume-provisioner +- Tiller for Helm \ No newline at end of file diff --git a/deploy/aws/tidb-operator/main.tf b/deploy/aws/tidb-operator/main.tf new file mode 100644 index 0000000000..8789da842f --- /dev/null +++ b/deploy/aws/tidb-operator/main.tf @@ -0,0 +1,86 @@ +module "eks" { + source = "terraform-aws-modules/eks/aws" + + cluster_name = var.eks_name + cluster_version = var.eks_version + vpc_id = var.vpc_id + config_output_path = var.config_output_path + subnets = var.subnets + + tags = { + app = "tidb" + } + + worker_groups = [ + { + name = "${var.eks_name}-control" + key_name = var.ssh_key_name + instance_type = var.default_worker_group_instance_type + public_ip = false + asg_desired_capacity = var.default_worker_group_instance_count + asg_max_size = var.default_worker_group_instance_count + 2 + }, + ] +} + +# kubernetes and helm providers rely on EKS, but terraform provider doesn't support depends_on +# follow this link https://github.com/hashicorp/terraform/issues/2430#issuecomment-370685911 +# we have the following hack +resource "local_file" "kubeconfig" { + depends_on = [module.eks] + sensitive_content = module.eks.kubeconfig + filename = module.eks.kubeconfig_filename +} + +provider "helm" { + insecure = true + # service_account = "tiller" + # install_tiller = true # currently this doesn't work, so we install tiller in the local-exec provisioner. See https://github.com/terraform-providers/terraform-provider-helm/issues/148 + kubernetes { + config_path = local_file.kubeconfig.filename + } +} + +resource "null_resource" "setup-env" { + depends_on = [local_file.kubeconfig] + + provisioner "local-exec" { + working_dir = path.module + command = < kube_config.yaml +kubectl apply -f https://raw.githubusercontent.com/pingcap/tidb-operator/v1.0.0-beta.3/manifests/crd.yaml +kubectl apply -f https://raw.githubusercontent.com/pingcap/tidb-operator/v1.0.0-beta.3/manifests/tiller-rbac.yaml +kubectl apply -f manifests/local-volume-provisioner.yaml +kubectl apply -f manifests/gp2-storageclass.yaml +helm init --service-account tiller --upgrade --wait +until helm ls; do + echo "Wait tiller ready" + sleep 1 +done +rm kube_config.yaml +EOS + environment = { + KUBECONFIG = "kube_config.yaml" + } + } +} + +data "helm_repository" "pingcap" { + depends_on = ["null_resource.setup-env"] + name = "pingcap" + url = "http://charts.pingcap.org/" +} + +resource "helm_release" "tidb-operator" { + depends_on = ["null_resource.setup-env"] + + repository = data.helm_repository.pingcap.name + chart = "tidb-operator" + version = var.operator_version + namespace = "tidb-admin" + name = "tidb-operator" + values = [var.operator_helm_values] +} + + + diff --git a/deploy/aws/eks/manifests/gp2-storageclass.yaml b/deploy/aws/tidb-operator/manifests/gp2-storageclass.yaml similarity index 100% rename from deploy/aws/eks/manifests/gp2-storageclass.yaml rename to deploy/aws/tidb-operator/manifests/gp2-storageclass.yaml diff --git a/deploy/aws/eks/manifests/local-volume-provisioner.yaml b/deploy/aws/tidb-operator/manifests/local-volume-provisioner.yaml similarity index 100% rename from deploy/aws/eks/manifests/local-volume-provisioner.yaml rename to deploy/aws/tidb-operator/manifests/local-volume-provisioner.yaml diff --git a/deploy/aws/tidb-operator/outputs.tf b/deploy/aws/tidb-operator/outputs.tf new file mode 100644 index 0000000000..47785e4544 --- /dev/null +++ b/deploy/aws/tidb-operator/outputs.tf @@ -0,0 +1,3 @@ +output "eks" { + value = module.eks +} \ No newline at end of file diff --git a/deploy/aws/tidb-operator/variables.tf b/deploy/aws/tidb-operator/variables.tf new file mode 100644 index 0000000000..1d2aebceb3 --- /dev/null +++ b/deploy/aws/tidb-operator/variables.tf @@ -0,0 +1,53 @@ +variable "eks_name" { + description = "Name of the EKS cluster. Also used as a prefix in names of related resources." + type = string +} + +variable "eks_version" { + description = "Kubernetes version to use for the EKS cluster." + type = string + default = "1.12" +} + +variable "operator_version" { + description = "TiDB Operator version" + type = string + default = "v1.0.0-beta.3" +} + +variable "operator_helm_values" { + description = "Operator helm values" + type = string + default = "" +} + +variable "config_output_path" { + description = "Where to save the Kubectl config file (if `write_kubeconfig = true`). Should end in a forward slash `/` ." + type = string + default = "./" +} + +variable "subnets" { + description = "A list of subnets to place the EKS cluster and workers within." + type = list(string) +} + +variable "vpc_id" { + description = "VPC where the cluster and workers will be deployed." + type = string +} + +variable "default_worker_group_instance_type" { + description = "The instance type of default worker groups, this group will be used to run tidb-operator" + default = "m4.large" +} + +variable "default_worker_group_instance_count" { + description = "The instance count of default worker groups, this group will be used to run tidb-operator" + default = 1 +} + +variable "ssh_key_name" { + type = string +} + diff --git a/deploy/aws/tikv-userdata.sh b/deploy/aws/tikv-userdata.sh deleted file mode 100644 index 3187a39fff..0000000000 --- a/deploy/aws/tikv-userdata.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash - -# set system ulimits -cat < /etc/security/limits.d/99-tidb.conf -root soft nofile 1000000 -root hard nofile 1000000 -root soft core unlimited -root soft stack 10240 -EOF -# config docker ulimits -cp /usr/lib/systemd/system/docker.service /etc/systemd/system/docker.service -sed -i 's/LimitNOFILE=infinity/LimitNOFILE=1048576/' /etc/systemd/system/docker.service -sed -i 's/LimitNPROC=infinity/LimitNPROC=1048576/' /etc/systemd/system/docker.service -systemctl daemon-reload -systemctl restart docker - -# format and mount nvme disk -if grep nvme0n1 /etc/fstab; then - echo "disk already mounted" -else - mkfs -t ext4 /dev/nvme0n1 - mkdir -p /mnt/disks/ssd1 - cat <> /etc/fstab -/dev/nvme0n1 /mnt/disks/ssd1 ext4 defaults,nofail,noatime,nodelalloc 0 2 -EOF - mount -a -fi diff --git a/deploy/aws/userdata.sh b/deploy/aws/userdata.sh deleted file mode 100644 index 9c9cc27be5..0000000000 --- a/deploy/aws/userdata.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash - -# set ulimits -cat < /etc/security/limits.d/99-tidb.conf -root soft nofile 1000000 -root hard nofile 1000000 -root soft core unlimited -root soft stack 10240 -EOF -# config docker ulimit -cp /usr/lib/systemd/system/docker.service /etc/systemd/system/docker.service -sed -i 's/LimitNOFILE=infinity/LimitNOFILE=1048576/' /etc/systemd/system/docker.service -sed -i 's/LimitNPROC=infinity/LimitNPROC=1048576/' /etc/systemd/system/docker.service -systemctl daemon-reload -systemctl restart docker - -# format and mount nvme disk -if grep nvme0n1 /etc/fstab || grep nvme1n1 /etc/fstab; then - echo "disk already mounted" -else - if mkfs -t ext4 /dev/nvme1n1 ; then - - mkdir -p /mnt/disks/ssd1 - cat <> /etc/fstab -/dev/nvme1n1 /mnt/disks/ssd1 ext4 defaults,nofail,noatime,nodelalloc 0 2 -EOF - mount -a - else - mkfs -t ext4 /dev/nvme0n1 - mkdir -p /mnt/disks/ssd1 - cat <> /etc/fstab -/dev/nvme0n1 /mnt/disks/ssd1 ext4 defaults,nofail,noatime,nodelalloc 0 2 -EOF - mount -a - fi -fi - diff --git a/deploy/aws/variables.tf b/deploy/aws/variables.tf index a6bc447687..4364c2d2e3 100644 --- a/deploy/aws/variables.tf +++ b/deploy/aws/variables.tf @@ -22,6 +22,11 @@ variable "operator_version" { default = "v1.0.0-beta.3" } +variable "operator_values" { + description = "The helm values of TiDB Operator" + default = "" +} + # Please note that this is only for manually created VPCs, deploying multiple EKS # clusters in one VPC is NOT supported now. variable "create_vpc" { @@ -37,13 +42,13 @@ variable "vpc_cidr" { variable "private_subnets" { description = "VPC private subnets, must be set correctly if create_vpc is true" type = list(string) - default = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] + default = ["10.0.16.0/20", "10.0.32.0/20", "10.0.48.0/20"] } variable "public_subnets" { description = "VPC public subnets, must be set correctly if create_vpc is true" type = list(string) - default = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] + default = ["10.0.64.0/20", "10.0.80.0/20", "10.0.96.0/20"] } variable "vpc_id" { @@ -75,7 +80,7 @@ variable "bastion_instance_type" { # For aws tutorials compatiablity variable "default_cluster_version" { - default = "3.0.0" + default = "v3.0.0" } variable "default_cluster_pd_count" { @@ -109,3 +114,4 @@ variable "default_cluster_monitor_instance_type" { variable "default_cluster_name" { default = "my-cluster" } + From a4a49c606a70b74a375f8926f609d894325025e8 Mon Sep 17 00:00:00 2001 From: Aylei Date: Wed, 3 Jul 2019 00:10:36 +0800 Subject: [PATCH 08/11] Address review comments Signed-off-by: Aylei --- deploy/aws/clusters.tf | 42 + .../aws/terraform.tfstate.1562081991.backup | 3113 +++++++++++++++++ deploy/aws/tidb-cluster/cluster.tf | 21 +- deploy/aws/tidb-cluster/local.tf | 4 +- deploy/aws/tidb-operator/main.tf | 7 +- .../manifests/local-volume-provisioner.yaml | 2 + 6 files changed, 3166 insertions(+), 23 deletions(-) create mode 100644 deploy/aws/terraform.tfstate.1562081991.backup diff --git a/deploy/aws/clusters.tf b/deploy/aws/clusters.tf index 3049468699..81c2ffe093 100644 --- a/deploy/aws/clusters.tf +++ b/deploy/aws/clusters.tf @@ -1,3 +1,21 @@ +resource "local_file" "kubeconfig" { + depends_on = [module.tidb-operator.eks] + sensitive_content = module.tidb-operator.eks.kubeconfig + filename = module.tidb-operator.eks.kubeconfig_filename +} + +# The helm provider for TiDB clusters must be configured in the top level, otherwise removing clusters will failed due to +# the helm provider configuration is removed too. +provider "helm" { + alias = "eks" + insecure = true + # service_account = "tiller" + install_tiller = false # currently this doesn't work, so we install tiller in the local-exec provisioner. See https://github.com/terraform-providers/terraform-provider-helm/issues/148 + kubernetes { + config_path = local_file.kubeconfig.filename + } +} + # TiDB cluster declaration example #module "example-cluster" { # source = "./tidb-cluster" @@ -20,6 +38,9 @@ #} module "default-cluster" { + providers = { + helm = "helm.eks" + } source = "./tidb-cluster" eks = local.default_eks subnets = local.default_subnets @@ -36,3 +57,24 @@ module "default-cluster" { monitor_instance_type = var.default_cluster_monitor_instance_type override_values = file("default-cluster.yaml") } + +module "test-cluster" { + providers = { + helm = "helm.eks" + } + source = "./tidb-cluster" + eks = local.default_eks + subnets = local.default_subnets + + cluster_name = "test-cluster" + cluster_version = var.default_cluster_version + ssh_key_name = module.key-pair.key_name + pd_count = var.default_cluster_pd_count + pd_instance_type = var.default_cluster_pd_instance_type + tikv_count = var.default_cluster_tikv_count + tikv_instance_type = var.default_cluster_tikv_instance_type + tidb_count = var.default_cluster_tidb_count + tidb_instance_type = var.default_cluster_tidb_instance_type + monitor_instance_type = var.default_cluster_monitor_instance_type + override_values = file("default-cluster.yaml") +} diff --git a/deploy/aws/terraform.tfstate.1562081991.backup b/deploy/aws/terraform.tfstate.1562081991.backup new file mode 100644 index 0000000000..fc162942ba --- /dev/null +++ b/deploy/aws/terraform.tfstate.1562081991.backup @@ -0,0 +1,3113 @@ +{ + "version": 4, + "terraform_version": "0.12.3", + "serial": 1906, + "lineage": "0c22b019-b12a-5c6c-6ee4-a1daa6cb8515", + "outputs": { + "eks_endpoint": { + "value": "https://2B09ED2329DF4012CC0A6705BFB14965.yl4.us-west-2.eks.amazonaws.com", + "type": "string" + }, + "kubeconfig_filename": { + "value": "credentials/kubeconfig_my-cluster", + "type": "string" + } + }, + "resources": [ + { + "module": "module.tidb-operator.module.eks", + "mode": "data", + "type": "aws_ami", + "name": "eks_worker", + "provider": "provider.aws", + "instances": [ + { + "schema_version": 0, + "attributes": { + "architecture": "x86_64", + "block_device_mappings": [ + { + "device_name": "/dev/xvda", + "ebs": { + "delete_on_termination": "true", + "encrypted": "false", + "iops": "0", + "snapshot_id": "snap-0b4944fcf084e5907", + "volume_size": "20", + "volume_type": "gp2" + }, + "no_device": "", + "virtual_name": "" + } + ], + "creation_date": "2019-06-15T06:42:59.000Z", + "description": "EKS Kubernetes Worker AMI with AmazonLinux2 image (k8s: 1.12.7, docker:18.06)", + "executable_users": null, + "filter": [ + { + "name": "name", + "values": [ + "amazon-eks-node-1.12-v*" + ] + } + ], + "hypervisor": "xen", + "id": "ami-0f11fd98b02f12a4c", + "image_id": "ami-0f11fd98b02f12a4c", + "image_location": "amazon/amazon-eks-node-1.12-v20190614", + "image_owner_alias": "amazon", + "image_type": "machine", + "kernel_id": null, + "most_recent": true, + "name": "amazon-eks-node-1.12-v20190614", + "name_regex": null, + "owner_id": "602401143452", + "owners": [ + "602401143452" + ], + "platform": null, + "product_codes": [], + "public": true, + "ramdisk_id": null, + "root_device_name": "/dev/xvda", + "root_device_type": "ebs", + "root_snapshot_id": "snap-0b4944fcf084e5907", + "sriov_net_support": "simple", + "state": "available", + "state_reason": { + "code": "UNSET", + "message": "UNSET" + }, + "tags": {}, + "virtualization_type": "hvm" + } + } + ] + }, + { + "mode": "data", + "type": "aws_availability_zones", + "name": "available", + "provider": "provider.aws", + "instances": [ + { + "schema_version": 0, + "attributes": { + "blacklisted_names": null, + "blacklisted_zone_ids": null, + "id": "2019-07-02 15:16:35.907254 +0000 UTC", + "names": [ + "us-west-2a", + "us-west-2b", + "us-west-2c", + "us-west-2d" + ], + "state": null, + "zone_ids": [ + "usw2-az2", + "usw2-az1", + "usw2-az3", + "usw2-az4" + ] + } + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "data", + "type": "aws_caller_identity", + "name": "current", + "provider": "provider.aws", + "instances": [ + { + "schema_version": 0, + "attributes": { + "account_id": "385595570414", + "arn": "arn:aws:iam::385595570414:user/dengshsuan", + "id": "2019-07-02 15:16:36.89717 +0000 UTC", + "user_id": "AIDAVTR2JPDXCNJDMZJ6H" + } + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "data", + "type": "aws_iam_instance_profile", + "name": "custom_worker_group_iam_instance_profile", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "data", + "type": "aws_iam_instance_profile", + "name": "custom_worker_group_launch_template_iam_instance_profile", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "data", + "type": "aws_iam_instance_profile", + "name": "custom_worker_group_launch_template_mixed_iam_instance_profile", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "data", + "type": "aws_iam_policy_document", + "name": "cluster_assume_role_policy", + "provider": "provider.aws", + "instances": [ + { + "schema_version": 0, + "attributes": { + "id": "2764486067", + "json": "{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"EKSClusterAssumeRole\",\n \"Effect\": \"Allow\",\n \"Action\": \"sts:AssumeRole\",\n \"Principal\": {\n \"Service\": \"eks.amazonaws.com\"\n }\n }\n ]\n}", + "override_json": null, + "policy_id": null, + "source_json": null, + "statement": [ + { + "actions": [ + "sts:AssumeRole" + ], + "condition": [], + "effect": "Allow", + "not_actions": [], + "not_principals": [], + "not_resources": [], + "principals": [ + { + "identifiers": [ + "eks.amazonaws.com" + ], + "type": "Service" + } + ], + "resources": [], + "sid": "EKSClusterAssumeRole" + } + ], + "version": "2012-10-17" + } + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "data", + "type": "aws_iam_policy_document", + "name": "worker_autoscaling", + "provider": "provider.aws", + "instances": [ + { + "schema_version": 0, + "attributes": { + "id": "2336810661", + "json": "{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"eksWorkerAutoscalingAll\",\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ec2:DescribeLaunchTemplateVersions\",\n \"autoscaling:DescribeTags\",\n \"autoscaling:DescribeLaunchConfigurations\",\n \"autoscaling:DescribeAutoScalingInstances\",\n \"autoscaling:DescribeAutoScalingGroups\"\n ],\n \"Resource\": \"*\"\n },\n {\n \"Sid\": \"eksWorkerAutoscalingOwn\",\n \"Effect\": \"Allow\",\n \"Action\": [\n \"autoscaling:UpdateAutoScalingGroup\",\n \"autoscaling:TerminateInstanceInAutoScalingGroup\",\n \"autoscaling:SetDesiredCapacity\"\n ],\n \"Resource\": \"*\",\n \"Condition\": {\n \"StringEquals\": {\n \"autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled\": \"true\",\n \"autoscaling:ResourceTag/kubernetes.io/cluster/my-cluster\": \"owned\"\n }\n }\n }\n ]\n}", + "override_json": null, + "policy_id": null, + "source_json": null, + "statement": [ + { + "actions": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:DescribeLaunchTemplateVersions" + ], + "condition": [], + "effect": "Allow", + "not_actions": [], + "not_principals": [], + "not_resources": [], + "principals": [], + "resources": [ + "*" + ], + "sid": "eksWorkerAutoscalingAll" + }, + { + "actions": [ + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "autoscaling:UpdateAutoScalingGroup" + ], + "condition": [ + { + "test": "StringEquals", + "values": [ + "owned" + ], + "variable": "autoscaling:ResourceTag/kubernetes.io/cluster/my-cluster" + }, + { + "test": "StringEquals", + "values": [ + "true" + ], + "variable": "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled" + } + ], + "effect": "Allow", + "not_actions": [], + "not_principals": [], + "not_resources": [], + "principals": [], + "resources": [ + "*" + ], + "sid": "eksWorkerAutoscalingOwn" + } + ], + "version": "2012-10-17" + }, + "depends_on": [ + "aws_eks_cluster.this" + ] + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "data", + "type": "aws_iam_policy_document", + "name": "workers_assume_role_policy", + "provider": "provider.aws", + "instances": [ + { + "schema_version": 0, + "attributes": { + "id": "3778018924", + "json": "{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"EKSWorkerAssumeRole\",\n \"Effect\": \"Allow\",\n \"Action\": \"sts:AssumeRole\",\n \"Principal\": {\n \"Service\": \"ec2.amazonaws.com\"\n }\n }\n ]\n}", + "override_json": null, + "policy_id": null, + "source_json": null, + "statement": [ + { + "actions": [ + "sts:AssumeRole" + ], + "condition": [], + "effect": "Allow", + "not_actions": [], + "not_principals": [], + "not_resources": [], + "principals": [ + { + "identifiers": [ + "ec2.amazonaws.com" + ], + "type": "Service" + } + ], + "resources": [], + "sid": "EKSWorkerAssumeRole" + } + ], + "version": "2012-10-17" + } + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "data", + "type": "aws_iam_role", + "name": "custom_cluster_iam_role", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "data", + "type": "aws_region", + "name": "current", + "provider": "provider.aws", + "instances": [ + { + "schema_version": 0, + "attributes": { + "current": null, + "description": "US West (Oregon)", + "endpoint": "ec2.us-west-2.amazonaws.com", + "id": "us-west-2", + "name": "us-west-2" + } + } + ] + }, + { + "module": "module.vpc", + "mode": "data", + "type": "aws_vpc_endpoint_service", + "name": "apigw", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "data", + "type": "aws_vpc_endpoint_service", + "name": "cloudtrail", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "data", + "type": "aws_vpc_endpoint_service", + "name": "dynamodb", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "data", + "type": "aws_vpc_endpoint_service", + "name": "ec2", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "data", + "type": "aws_vpc_endpoint_service", + "name": "ec2messages", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "data", + "type": "aws_vpc_endpoint_service", + "name": "ecr_api", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "data", + "type": "aws_vpc_endpoint_service", + "name": "ecr_dkr", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "data", + "type": "aws_vpc_endpoint_service", + "name": "ecs", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "data", + "type": "aws_vpc_endpoint_service", + "name": "ecs_agent", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "data", + "type": "aws_vpc_endpoint_service", + "name": "ecs_telemetry", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "data", + "type": "aws_vpc_endpoint_service", + "name": "elasticloadbalancing", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "data", + "type": "aws_vpc_endpoint_service", + "name": "events", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "data", + "type": "aws_vpc_endpoint_service", + "name": "kms", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "data", + "type": "aws_vpc_endpoint_service", + "name": "logs", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "data", + "type": "aws_vpc_endpoint_service", + "name": "monitoring", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "data", + "type": "aws_vpc_endpoint_service", + "name": "s3", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "data", + "type": "aws_vpc_endpoint_service", + "name": "sns", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "data", + "type": "aws_vpc_endpoint_service", + "name": "sqs", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "data", + "type": "aws_vpc_endpoint_service", + "name": "ssm", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "data", + "type": "aws_vpc_endpoint_service", + "name": "ssmmessages", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.default-cluster", + "mode": "data", + "type": "external", + "name": "monitor_elb", + "provider": "provider.external", + "instances": [] + }, + { + "module": "module.test-cluster", + "mode": "data", + "type": "external", + "name": "monitor_elb", + "provider": "provider.external", + "instances": [] + }, + { + "module": "module.default-cluster", + "mode": "data", + "type": "external", + "name": "tidb_elb", + "provider": "provider.external", + "instances": [] + }, + { + "module": "module.test-cluster", + "mode": "data", + "type": "external", + "name": "tidb_elb", + "provider": "provider.external", + "instances": [] + }, + { + "module": "module.default-cluster", + "mode": "data", + "type": "helm_repository", + "name": "pingcap", + "provider": "provider.helm.eks", + "instances": [] + }, + { + "module": "module.test-cluster", + "mode": "data", + "type": "helm_repository", + "name": "pingcap", + "provider": "provider.helm.eks", + "instances": [] + }, + { + "module": "module.tidb-operator", + "mode": "data", + "type": "helm_repository", + "name": "pingcap", + "provider": "module.tidb-operator.provider.helm", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "data", + "type": "template_file", + "name": "aws_authenticator_env_variables", + "each": "list", + "provider": "provider.template", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "data", + "type": "template_file", + "name": "config_map_aws_auth", + "provider": "provider.template", + "instances": [ + { + "schema_version": 0, + "attributes": { + "filename": null, + "id": "e30bd200646bbb687a3629838542cd2efd20aa6f4fe3dca6cc606fa4895ba608", + "rendered": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: aws-auth\n namespace: kube-system\ndata:\n mapRoles: |\n - rolearn: arn:aws:iam::385595570414:role/my-cluster20190702141950357100000005\n username: system:node:{{EC2PrivateDNSName}}\n groups:\n - system:bootstrappers\n - system:nodes\n\n\n mapUsers: |\n\n mapAccounts: |\n\n", + "template": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: aws-auth\n namespace: kube-system\ndata:\n mapRoles: |\n${worker_role_arn}\n${map_roles}\n mapUsers: |\n${map_users}\n mapAccounts: |\n${map_accounts}\n", + "vars": { + "map_accounts": "", + "map_roles": "", + "map_users": "", + "worker_role_arn": " - rolearn: arn:aws:iam::385595570414:role/my-cluster20190702141950357100000005\n username: system:node:{{EC2PrivateDNSName}}\n groups:\n - system:bootstrappers\n - system:nodes\n" + } + }, + "depends_on": [ + "data.template_file.launch_template_mixed_worker_role_arns", + "data.template_file.launch_template_worker_role_arns", + "data.template_file.map_accounts", + "data.template_file.map_roles", + "data.template_file.map_users", + "data.template_file.worker_role_arns" + ] + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "data", + "type": "template_file", + "name": "kubeconfig", + "provider": "provider.template", + "instances": [ + { + "schema_version": 0, + "attributes": { + "filename": null, + "id": "271318f91812c79d01834167167c38fb85a7b7fe82bc51b092b967bb138dc5a4", + "rendered": "apiVersion: v1\npreferences: {}\nkind: Config\n\nclusters:\n- cluster:\n server: https://2B09ED2329DF4012CC0A6705BFB14965.yl4.us-west-2.eks.amazonaws.com\n certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNU1EY3dNakUwTVRZMU9Wb1hEVEk1TURZeU9URTBNVFkxT1Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS0J4CmVKdlEzSldFbkZqckFBRVFVTDFUS0FTRURxaUt5QXlwd2JlN0UwVE5jQnBqR1kvUzFTZnRZMlZEOGtQbnFncjMKOHFuYXl3d1cvOERYdjEvN0t5OHdsU25GT2xuTjZZWk5vUm9jRFE2V1JNZ2Uzc3p5MFh3Ly9yWW5CQm8xeitFZQpHNnNVNzlXQjl4K1VKdWJNaWJ5NW9YYXNqUUNtZFVHOWkzVklMQTJHb1pGTnN0d1VOUTlTSUhaRlpIWkltVFRrCmdqcU11T3dxSGZJa3VDNlNDREUwTnhtSUVrWU8wb1J6WVdISCt6OEsxQjFhMGF2c1p4NzJGZjJ1UE5oTlE1RU0KQy95djFmL3daTDE2RHJBMURUTFJSWFlnbXQyL2lvQVRWRFpQdWhJQzkzWHNZMHI1blYrd2xWQml0NVZvYTJ5NAp6dXphdWhEM1gzMTBrZVBhUXVjQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFJTDBObG1CRysrdjl3bjd0eHpiaGxoeWVOM2MKbUE3NTNUSTBROWlGenJ3SERkV3J3TnMrbW9XSzlRbUJhOG1YTmhyZUpIYXpPUWt0cUNHQkM4OFZkeXhHZE9PZwo2SWVQSnA2NW83YzRYSXdTY1lkai9wWXpLeHZXaS82Zi9aa000TzAxQUpUZkVaWWVEbmpmaXZkaGtzdkd2VVFuCjI5TDZmQkg2dXh5a3k0cVJraW9kTkppMHVIazdaNUVYNStMciszNzhBeHBKOEJVMFpwbk1sNVhNUFR1bzE5VysKR3B5UElVQXBUamtQdC9NemQ4czdyWDJmQWRaYW9vUUNvNEw4dkpGWHdudDBHUUpzL3c5S2tiYXcxY00xcXRZTQpFa3hDQno5M0l1OVhyUFVoMG9tdXVyR2d5akRLdUJoTkZCby8wOGMvZmx6UVlSVGRCRmpXNTY5c3VVQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=\n name: eks_my-cluster\n\ncontexts:\n- context:\n cluster: eks_my-cluster\n user: eks_my-cluster\n name: eks_my-cluster\n\ncurrent-context: eks_my-cluster\n\nusers:\n- name: eks_my-cluster\n user:\n exec:\n apiVersion: client.authentication.k8s.io/v1alpha1\n command: aws-iam-authenticator\n args:\n - \"token\"\n - \"-i\"\n - \"my-cluster\"\n\n\n", + "template": "apiVersion: v1\npreferences: {}\nkind: Config\n\nclusters:\n- cluster:\n server: ${endpoint}\n certificate-authority-data: ${cluster_auth_base64}\n name: ${kubeconfig_name}\n\ncontexts:\n- context:\n cluster: ${kubeconfig_name}\n user: ${kubeconfig_name}\n name: ${kubeconfig_name}\n\ncurrent-context: ${kubeconfig_name}\n\nusers:\n- name: ${kubeconfig_name}\n user:\n exec:\n apiVersion: client.authentication.k8s.io/v1alpha1\n command: ${aws_authenticator_command}\n args:\n${aws_authenticator_command_args}\n${aws_authenticator_additional_args}\n${aws_authenticator_env_variables}\n", + "vars": { + "aws_authenticator_additional_args": "", + "aws_authenticator_command": "aws-iam-authenticator", + "aws_authenticator_command_args": " - \"token\"\n - \"-i\"\n - \"my-cluster\"", + "aws_authenticator_env_variables": "", + "cluster_auth_base64": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNU1EY3dNakUwTVRZMU9Wb1hEVEk1TURZeU9URTBNVFkxT1Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS0J4CmVKdlEzSldFbkZqckFBRVFVTDFUS0FTRURxaUt5QXlwd2JlN0UwVE5jQnBqR1kvUzFTZnRZMlZEOGtQbnFncjMKOHFuYXl3d1cvOERYdjEvN0t5OHdsU25GT2xuTjZZWk5vUm9jRFE2V1JNZ2Uzc3p5MFh3Ly9yWW5CQm8xeitFZQpHNnNVNzlXQjl4K1VKdWJNaWJ5NW9YYXNqUUNtZFVHOWkzVklMQTJHb1pGTnN0d1VOUTlTSUhaRlpIWkltVFRrCmdqcU11T3dxSGZJa3VDNlNDREUwTnhtSUVrWU8wb1J6WVdISCt6OEsxQjFhMGF2c1p4NzJGZjJ1UE5oTlE1RU0KQy95djFmL3daTDE2RHJBMURUTFJSWFlnbXQyL2lvQVRWRFpQdWhJQzkzWHNZMHI1blYrd2xWQml0NVZvYTJ5NAp6dXphdWhEM1gzMTBrZVBhUXVjQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFJTDBObG1CRysrdjl3bjd0eHpiaGxoeWVOM2MKbUE3NTNUSTBROWlGenJ3SERkV3J3TnMrbW9XSzlRbUJhOG1YTmhyZUpIYXpPUWt0cUNHQkM4OFZkeXhHZE9PZwo2SWVQSnA2NW83YzRYSXdTY1lkai9wWXpLeHZXaS82Zi9aa000TzAxQUpUZkVaWWVEbmpmaXZkaGtzdkd2VVFuCjI5TDZmQkg2dXh5a3k0cVJraW9kTkppMHVIazdaNUVYNStMciszNzhBeHBKOEJVMFpwbk1sNVhNUFR1bzE5VysKR3B5UElVQXBUamtQdC9NemQ4czdyWDJmQWRaYW9vUUNvNEw4dkpGWHdudDBHUUpzL3c5S2tiYXcxY00xcXRZTQpFa3hDQno5M0l1OVhyUFVoMG9tdXVyR2d5akRLdUJoTkZCby8wOGMvZmx6UVlSVGRCRmpXNTY5c3VVQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=", + "endpoint": "https://2B09ED2329DF4012CC0A6705BFB14965.yl4.us-west-2.eks.amazonaws.com", + "kubeconfig_name": "eks_my-cluster", + "region": "us-west-2" + } + }, + "depends_on": [ + "aws_eks_cluster.this", + "data.aws_region.current", + "data.template_file.aws_authenticator_env_variables" + ] + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "data", + "type": "template_file", + "name": "launch_template_mixed_worker_role_arns", + "each": "list", + "provider": "provider.template", + "instances": [] + }, + { + "module": "module.default-cluster", + "mode": "data", + "type": "template_file", + "name": "launch_template_userdata", + "each": "list", + "provider": "provider.template", + "instances": [] + }, + { + "module": "module.test-cluster", + "mode": "data", + "type": "template_file", + "name": "launch_template_userdata", + "each": "list", + "provider": "provider.template", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "data", + "type": "template_file", + "name": "launch_template_userdata", + "each": "list", + "provider": "provider.template", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "data", + "type": "template_file", + "name": "launch_template_worker_role_arns", + "each": "list", + "provider": "provider.template", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "data", + "type": "template_file", + "name": "map_accounts", + "each": "list", + "provider": "provider.template", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "data", + "type": "template_file", + "name": "map_roles", + "each": "list", + "provider": "provider.template", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "data", + "type": "template_file", + "name": "map_users", + "each": "list", + "provider": "provider.template", + "instances": [] + }, + { + "module": "module.default-cluster", + "mode": "data", + "type": "template_file", + "name": "userdata", + "each": "list", + "provider": "provider.template", + "instances": [] + }, + { + "module": "module.test-cluster", + "mode": "data", + "type": "template_file", + "name": "userdata", + "each": "list", + "provider": "provider.template", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "data", + "type": "template_file", + "name": "userdata", + "each": "list", + "provider": "provider.template", + "instances": [ + { + "index_key": 0, + "schema_version": 0, + "attributes": { + "filename": null, + "id": "4a99c387d5c0bc4b5f2dbea53b336e9dd3bd912664fab7c8261b0394938c1ce9", + "rendered": "#!/bin/bash -xe\n\n# Allow user supplied pre userdata code\n\n\n# Bootstrap and join the cluster\n/etc/eks/bootstrap.sh --b64-cluster-ca 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNU1EY3dNakUwTVRZMU9Wb1hEVEk1TURZeU9URTBNVFkxT1Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS0J4CmVKdlEzSldFbkZqckFBRVFVTDFUS0FTRURxaUt5QXlwd2JlN0UwVE5jQnBqR1kvUzFTZnRZMlZEOGtQbnFncjMKOHFuYXl3d1cvOERYdjEvN0t5OHdsU25GT2xuTjZZWk5vUm9jRFE2V1JNZ2Uzc3p5MFh3Ly9yWW5CQm8xeitFZQpHNnNVNzlXQjl4K1VKdWJNaWJ5NW9YYXNqUUNtZFVHOWkzVklMQTJHb1pGTnN0d1VOUTlTSUhaRlpIWkltVFRrCmdqcU11T3dxSGZJa3VDNlNDREUwTnhtSUVrWU8wb1J6WVdISCt6OEsxQjFhMGF2c1p4NzJGZjJ1UE5oTlE1RU0KQy95djFmL3daTDE2RHJBMURUTFJSWFlnbXQyL2lvQVRWRFpQdWhJQzkzWHNZMHI1blYrd2xWQml0NVZvYTJ5NAp6dXphdWhEM1gzMTBrZVBhUXVjQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFJTDBObG1CRysrdjl3bjd0eHpiaGxoeWVOM2MKbUE3NTNUSTBROWlGenJ3SERkV3J3TnMrbW9XSzlRbUJhOG1YTmhyZUpIYXpPUWt0cUNHQkM4OFZkeXhHZE9PZwo2SWVQSnA2NW83YzRYSXdTY1lkai9wWXpLeHZXaS82Zi9aa000TzAxQUpUZkVaWWVEbmpmaXZkaGtzdkd2VVFuCjI5TDZmQkg2dXh5a3k0cVJraW9kTkppMHVIazdaNUVYNStMciszNzhBeHBKOEJVMFpwbk1sNVhNUFR1bzE5VysKR3B5UElVQXBUamtQdC9NemQ4czdyWDJmQWRaYW9vUUNvNEw4dkpGWHdudDBHUUpzL3c5S2tiYXcxY00xcXRZTQpFa3hDQno5M0l1OVhyUFVoMG9tdXVyR2d5akRLdUJoTkZCby8wOGMvZmx6UVlSVGRCRmpXNTY5c3VVQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=' --apiserver-endpoint 'https://2B09ED2329DF4012CC0A6705BFB14965.yl4.us-west-2.eks.amazonaws.com' --kubelet-extra-args '' 'my-cluster'\n\n# Allow user supplied userdata code\n\n", + "template": "#!/bin/bash -xe\n\n# Allow user supplied pre userdata code\n${pre_userdata}\n\n# Bootstrap and join the cluster\n/etc/eks/bootstrap.sh --b64-cluster-ca '${cluster_auth_base64}' --apiserver-endpoint '${endpoint}' ${bootstrap_extra_args} --kubelet-extra-args '${kubelet_extra_args}' '${cluster_name}'\n\n# Allow user supplied userdata code\n${additional_userdata}\n", + "vars": { + "additional_userdata": "", + "bootstrap_extra_args": "", + "cluster_auth_base64": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNU1EY3dNakUwTVRZMU9Wb1hEVEk1TURZeU9URTBNVFkxT1Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS0J4CmVKdlEzSldFbkZqckFBRVFVTDFUS0FTRURxaUt5QXlwd2JlN0UwVE5jQnBqR1kvUzFTZnRZMlZEOGtQbnFncjMKOHFuYXl3d1cvOERYdjEvN0t5OHdsU25GT2xuTjZZWk5vUm9jRFE2V1JNZ2Uzc3p5MFh3Ly9yWW5CQm8xeitFZQpHNnNVNzlXQjl4K1VKdWJNaWJ5NW9YYXNqUUNtZFVHOWkzVklMQTJHb1pGTnN0d1VOUTlTSUhaRlpIWkltVFRrCmdqcU11T3dxSGZJa3VDNlNDREUwTnhtSUVrWU8wb1J6WVdISCt6OEsxQjFhMGF2c1p4NzJGZjJ1UE5oTlE1RU0KQy95djFmL3daTDE2RHJBMURUTFJSWFlnbXQyL2lvQVRWRFpQdWhJQzkzWHNZMHI1blYrd2xWQml0NVZvYTJ5NAp6dXphdWhEM1gzMTBrZVBhUXVjQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFJTDBObG1CRysrdjl3bjd0eHpiaGxoeWVOM2MKbUE3NTNUSTBROWlGenJ3SERkV3J3TnMrbW9XSzlRbUJhOG1YTmhyZUpIYXpPUWt0cUNHQkM4OFZkeXhHZE9PZwo2SWVQSnA2NW83YzRYSXdTY1lkai9wWXpLeHZXaS82Zi9aa000TzAxQUpUZkVaWWVEbmpmaXZkaGtzdkd2VVFuCjI5TDZmQkg2dXh5a3k0cVJraW9kTkppMHVIazdaNUVYNStMciszNzhBeHBKOEJVMFpwbk1sNVhNUFR1bzE5VysKR3B5UElVQXBUamtQdC9NemQ4czdyWDJmQWRaYW9vUUNvNEw4dkpGWHdudDBHUUpzL3c5S2tiYXcxY00xcXRZTQpFa3hDQno5M0l1OVhyUFVoMG9tdXVyR2d5akRLdUJoTkZCby8wOGMvZmx6UVlSVGRCRmpXNTY5c3VVQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=", + "cluster_name": "my-cluster", + "endpoint": "https://2B09ED2329DF4012CC0A6705BFB14965.yl4.us-west-2.eks.amazonaws.com", + "kubelet_extra_args": "", + "pre_userdata": "" + } + }, + "depends_on": [ + "aws_eks_cluster.this" + ] + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "data", + "type": "template_file", + "name": "worker_role_arns", + "each": "list", + "provider": "provider.template", + "instances": [ + { + "index_key": 0, + "schema_version": 0, + "attributes": { + "filename": null, + "id": "43e544b6f14ee17b5fe2c31f073e3ea86423a3b01f4f5733a32fc3e21b0326d2", + "rendered": " - rolearn: arn:aws:iam::385595570414:role/my-cluster20190702141950357100000005\n username: system:node:{{EC2PrivateDNSName}}\n groups:\n - system:bootstrappers\n - system:nodes\n", + "template": " - rolearn: ${worker_role_arn}\n username: system:node:{{EC2PrivateDNSName}}\n groups:\n - system:bootstrappers\n - system:nodes\n", + "vars": { + "worker_role_arn": "arn:aws:iam::385595570414:role/my-cluster20190702141950357100000005" + } + }, + "depends_on": [ + "aws_iam_instance_profile.workers", + "data.aws_caller_identity.current", + "data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile" + ] + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "data", + "type": "template_file", + "name": "workers_launch_template_mixed", + "each": "list", + "provider": "provider.template", + "instances": [] + }, + { + "module": "module.default-cluster", + "mode": "managed", + "type": "aws_autoscaling_group", + "name": "workers", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.test-cluster", + "mode": "managed", + "type": "aws_autoscaling_group", + "name": "workers", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_autoscaling_group", + "name": "workers", + "each": "list", + "provider": "provider.aws", + "instances": [ + { + "index_key": 0, + "schema_version": 0, + "attributes": { + "arn": "arn:aws:autoscaling:us-west-2:385595570414:autoScalingGroup:af99cd65-847c-402b-8d7a-834df71b543b:autoScalingGroupName/my-cluster-my-cluster-control2019070214202285200000000e", + "availability_zones": [ + "us-west-2a", + "us-west-2b", + "us-west-2c" + ], + "default_cooldown": 300, + "desired_capacity": 1, + "enabled_metrics": [], + "force_delete": false, + "health_check_grace_period": 300, + "health_check_type": "EC2", + "id": "my-cluster-my-cluster-control2019070214202285200000000e", + "initial_lifecycle_hook": [], + "launch_configuration": "my-cluster-my-cluster-control2019070214201744850000000d", + "launch_template": [], + "load_balancers": [], + "max_size": 3, + "metrics_granularity": "1Minute", + "min_elb_capacity": null, + "min_size": 1, + "mixed_instances_policy": [], + "name": "my-cluster-my-cluster-control2019070214202285200000000e", + "name_prefix": "my-cluster-my-cluster-control", + "placement_group": "", + "protect_from_scale_in": false, + "service_linked_role_arn": "arn:aws:iam::385595570414:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", + "suspended_processes": [ + "AZRebalance" + ], + "tag": [], + "tags": [ + { + "key": "Name", + "propagate_at_launch": "true", + "value": "my-cluster-my-cluster-control-eks_asg" + }, + { + "key": "kubernetes.io/cluster/my-cluster", + "propagate_at_launch": "true", + "value": "owned" + }, + { + "key": "k8s.io/cluster-autoscaler/disabled", + "propagate_at_launch": "false", + "value": "true" + }, + { + "key": "k8s.io/cluster-autoscaler/my-cluster", + "propagate_at_launch": "false", + "value": "my-cluster" + }, + { + "key": "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage", + "propagate_at_launch": "false", + "value": "100Gi" + }, + { + "key": "app", + "propagate_at_launch": "true", + "value": "tidb" + } + ], + "target_group_arns": [], + "termination_policies": [], + "timeouts": null, + "vpc_zone_identifier": [ + "subnet-01c62f5324fe05605", + "subnet-0869b629c725c799c", + "subnet-08b8c355f30ed58ba" + ], + "wait_for_capacity_timeout": "10m", + "wait_for_elb_capacity": null + }, + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiZGVsZXRlIjo2MDAwMDAwMDAwMDB9fQ==", + "depends_on": [ + "aws_eks_cluster.this", + "aws_launch_configuration.workers" + ] + } + ] + }, + { + "module": "module.default-cluster", + "mode": "managed", + "type": "aws_autoscaling_group", + "name": "workers_launch_template", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.test-cluster", + "mode": "managed", + "type": "aws_autoscaling_group", + "name": "workers_launch_template", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_autoscaling_group", + "name": "workers_launch_template", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_autoscaling_group", + "name": "workers_launch_template_mixed", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_cloudwatch_log_group", + "name": "this", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_db_subnet_group", + "name": "database", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_default_network_acl", + "name": "this", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_default_vpc", + "name": "this", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_eip", + "name": "nat", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_eks_cluster", + "name": "this", + "provider": "provider.aws", + "instances": [ + { + "schema_version": 0, + "attributes": { + "arn": "arn:aws:eks:us-west-2:385595570414:cluster/my-cluster", + "certificate_authority": [ + { + "data": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNU1EY3dNakUwTVRZMU9Wb1hEVEk1TURZeU9URTBNVFkxT1Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS0J4CmVKdlEzSldFbkZqckFBRVFVTDFUS0FTRURxaUt5QXlwd2JlN0UwVE5jQnBqR1kvUzFTZnRZMlZEOGtQbnFncjMKOHFuYXl3d1cvOERYdjEvN0t5OHdsU25GT2xuTjZZWk5vUm9jRFE2V1JNZ2Uzc3p5MFh3Ly9yWW5CQm8xeitFZQpHNnNVNzlXQjl4K1VKdWJNaWJ5NW9YYXNqUUNtZFVHOWkzVklMQTJHb1pGTnN0d1VOUTlTSUhaRlpIWkltVFRrCmdqcU11T3dxSGZJa3VDNlNDREUwTnhtSUVrWU8wb1J6WVdISCt6OEsxQjFhMGF2c1p4NzJGZjJ1UE5oTlE1RU0KQy95djFmL3daTDE2RHJBMURUTFJSWFlnbXQyL2lvQVRWRFpQdWhJQzkzWHNZMHI1blYrd2xWQml0NVZvYTJ5NAp6dXphdWhEM1gzMTBrZVBhUXVjQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFJTDBObG1CRysrdjl3bjd0eHpiaGxoeWVOM2MKbUE3NTNUSTBROWlGenJ3SERkV3J3TnMrbW9XSzlRbUJhOG1YTmhyZUpIYXpPUWt0cUNHQkM4OFZkeXhHZE9PZwo2SWVQSnA2NW83YzRYSXdTY1lkai9wWXpLeHZXaS82Zi9aa000TzAxQUpUZkVaWWVEbmpmaXZkaGtzdkd2VVFuCjI5TDZmQkg2dXh5a3k0cVJraW9kTkppMHVIazdaNUVYNStMciszNzhBeHBKOEJVMFpwbk1sNVhNUFR1bzE5VysKR3B5UElVQXBUamtQdC9NemQ4czdyWDJmQWRaYW9vUUNvNEw4dkpGWHdudDBHUUpzL3c5S2tiYXcxY00xcXRZTQpFa3hDQno5M0l1OVhyUFVoMG9tdXVyR2d5akRLdUJoTkZCby8wOGMvZmx6UVlSVGRCRmpXNTY5c3VVQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" + } + ], + "created_at": "2019-07-02 14:09:27 +0000 UTC", + "enabled_cluster_log_types": [], + "endpoint": "https://2B09ED2329DF4012CC0A6705BFB14965.yl4.us-west-2.eks.amazonaws.com", + "id": "my-cluster", + "name": "my-cluster", + "platform_version": "eks.2", + "role_arn": "arn:aws:iam::385595570414:role/my-cluster20190702140850744000000001", + "timeouts": { + "create": "15m", + "delete": "15m", + "update": null + }, + "version": "1.12", + "vpc_config": [ + { + "endpoint_private_access": false, + "endpoint_public_access": true, + "security_group_ids": [ + "sg-049b5a71010e17d3f" + ], + "subnet_ids": [ + "subnet-01c62f5324fe05605", + "subnet-0869b629c725c799c", + "subnet-08b8c355f30ed58ba" + ], + "vpc_id": "vpc-045ead8290dd948d4" + } + ] + }, + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo5MDAwMDAwMDAwMDAsImRlbGV0ZSI6OTAwMDAwMDAwMDAwLCJ1cGRhdGUiOjM2MDAwMDAwMDAwMDB9fQ==", + "depends_on": [ + "aws_cloudwatch_log_group.this", + "aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy", + "aws_iam_role_policy_attachment.cluster_AmazonEKSServicePolicy" + ] + } + ] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_elasticache_subnet_group", + "name": "elasticache", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_iam_instance_profile", + "name": "workers", + "each": "list", + "provider": "provider.aws", + "instances": [ + { + "index_key": 0, + "schema_version": 0, + "attributes": { + "arn": "arn:aws:iam::385595570414:instance-profile/my-cluster20190702141952556900000008", + "create_date": "2019-07-02T14:19:53Z", + "id": "my-cluster20190702141952556900000008", + "name": "my-cluster20190702141952556900000008", + "name_prefix": "my-cluster", + "path": "/", + "role": "my-cluster20190702141950357100000005", + "roles": [ + "my-cluster20190702141950357100000005" + ], + "unique_id": "AIPAVTR2JPDXMT2SSY5OY" + }, + "private": "bnVsbA==", + "depends_on": [ + "aws_eks_cluster.this" + ] + } + ] + }, + { + "module": "module.default-cluster", + "mode": "managed", + "type": "aws_iam_instance_profile", + "name": "workers_launch_template", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.test-cluster", + "mode": "managed", + "type": "aws_iam_instance_profile", + "name": "workers_launch_template", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_iam_instance_profile", + "name": "workers_launch_template", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_iam_instance_profile", + "name": "workers_launch_template_mixed", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_iam_policy", + "name": "worker_autoscaling", + "each": "list", + "provider": "provider.aws", + "instances": [ + { + "index_key": 0, + "schema_version": 0, + "attributes": { + "arn": "arn:aws:iam::385595570414:policy/eks-worker-autoscaling-my-cluster20190702141950399500000007", + "description": "EKS worker node autoscaling policy for cluster my-cluster", + "id": "arn:aws:iam::385595570414:policy/eks-worker-autoscaling-my-cluster20190702141950399500000007", + "name": "eks-worker-autoscaling-my-cluster20190702141950399500000007", + "name_prefix": "eks-worker-autoscaling-my-cluster", + "path": "/", + "policy": "{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"eksWorkerAutoscalingAll\",\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ec2:DescribeLaunchTemplateVersions\",\n \"autoscaling:DescribeTags\",\n \"autoscaling:DescribeLaunchConfigurations\",\n \"autoscaling:DescribeAutoScalingInstances\",\n \"autoscaling:DescribeAutoScalingGroups\"\n ],\n \"Resource\": \"*\"\n },\n {\n \"Sid\": \"eksWorkerAutoscalingOwn\",\n \"Effect\": \"Allow\",\n \"Action\": [\n \"autoscaling:UpdateAutoScalingGroup\",\n \"autoscaling:TerminateInstanceInAutoScalingGroup\",\n \"autoscaling:SetDesiredCapacity\"\n ],\n \"Resource\": \"*\",\n \"Condition\": {\n \"StringEquals\": {\n \"autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled\": \"true\",\n \"autoscaling:ResourceTag/kubernetes.io/cluster/my-cluster\": \"owned\"\n }\n }\n }\n ]\n}" + }, + "private": "bnVsbA==", + "depends_on": [ + "aws_eks_cluster.this", + "data.aws_iam_policy_document.worker_autoscaling" + ] + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_iam_role", + "name": "cluster", + "each": "list", + "provider": "provider.aws", + "instances": [ + { + "index_key": 0, + "schema_version": 0, + "attributes": { + "arn": "arn:aws:iam::385595570414:role/my-cluster20190702140850744000000001", + "assume_role_policy": "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"EKSClusterAssumeRole\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"eks.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}", + "create_date": "2019-07-02T14:08:52Z", + "description": "", + "force_detach_policies": true, + "id": "my-cluster20190702140850744000000001", + "max_session_duration": 3600, + "name": "my-cluster20190702140850744000000001", + "name_prefix": "my-cluster", + "path": "/", + "permissions_boundary": null, + "tags": { + "app": "tidb" + }, + "unique_id": "AROAVTR2JPDXFAOM7QD2V" + }, + "private": "bnVsbA==", + "depends_on": [ + "data.aws_iam_policy_document.cluster_assume_role_policy" + ] + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_iam_role", + "name": "workers", + "each": "list", + "provider": "provider.aws", + "instances": [ + { + "index_key": 0, + "schema_version": 0, + "attributes": { + "arn": "arn:aws:iam::385595570414:role/my-cluster20190702141950357100000005", + "assume_role_policy": "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"EKSWorkerAssumeRole\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}", + "create_date": "2019-07-02T14:19:51Z", + "description": "", + "force_detach_policies": true, + "id": "my-cluster20190702141950357100000005", + "max_session_duration": 3600, + "name": "my-cluster20190702141950357100000005", + "name_prefix": "my-cluster", + "path": "/", + "permissions_boundary": null, + "tags": {}, + "unique_id": "AROAVTR2JPDXOBSTW46SB" + }, + "private": "bnVsbA==", + "depends_on": [ + "aws_eks_cluster.this", + "data.aws_iam_policy_document.workers_assume_role_policy" + ] + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_iam_role_policy_attachment", + "name": "cluster_AmazonEKSClusterPolicy", + "each": "list", + "provider": "provider.aws", + "instances": [ + { + "index_key": 0, + "schema_version": 0, + "attributes": { + "id": "my-cluster20190702140850744000000001-20190702140855882500000002", + "policy_arn": "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", + "role": "my-cluster20190702140850744000000001" + }, + "private": "bnVsbA==", + "depends_on": [ + "aws_iam_role.cluster[0]" + ] + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_iam_role_policy_attachment", + "name": "cluster_AmazonEKSServicePolicy", + "each": "list", + "provider": "provider.aws", + "instances": [ + { + "index_key": 0, + "schema_version": 0, + "attributes": { + "id": "my-cluster20190702140850744000000001-20190702140856941000000003", + "policy_arn": "arn:aws:iam::aws:policy/AmazonEKSServicePolicy", + "role": "my-cluster20190702140850744000000001" + }, + "private": "bnVsbA==", + "depends_on": [ + "aws_iam_role.cluster[0]" + ] + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_iam_role_policy_attachment", + "name": "workers_AmazonEC2ContainerRegistryReadOnly", + "each": "list", + "provider": "provider.aws", + "instances": [ + { + "index_key": 0, + "schema_version": 0, + "attributes": { + "id": "my-cluster20190702141950357100000005-2019070214195450610000000b", + "policy_arn": "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", + "role": "my-cluster20190702141950357100000005" + }, + "private": "bnVsbA==", + "depends_on": [ + "aws_iam_role.workers[0]" + ] + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_iam_role_policy_attachment", + "name": "workers_AmazonEKSWorkerNodePolicy", + "each": "list", + "provider": "provider.aws", + "instances": [ + { + "index_key": 0, + "schema_version": 0, + "attributes": { + "id": "my-cluster20190702141950357100000005-2019070214195361280000000a", + "policy_arn": "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + "role": "my-cluster20190702141950357100000005" + }, + "private": "bnVsbA==", + "depends_on": [ + "aws_iam_role.workers[0]" + ] + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_iam_role_policy_attachment", + "name": "workers_AmazonEKS_CNI_Policy", + "each": "list", + "provider": "provider.aws", + "instances": [ + { + "index_key": 0, + "schema_version": 0, + "attributes": { + "id": "my-cluster20190702141950357100000005-20190702141953520500000009", + "policy_arn": "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + "role": "my-cluster20190702141950357100000005" + }, + "private": "bnVsbA==", + "depends_on": [ + "aws_iam_role.workers[0]" + ] + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_iam_role_policy_attachment", + "name": "workers_additional_policies", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_iam_role_policy_attachment", + "name": "workers_autoscaling", + "each": "list", + "provider": "provider.aws", + "instances": [ + { + "index_key": 0, + "schema_version": 0, + "attributes": { + "id": "my-cluster20190702141950357100000005-2019070214200167710000000c", + "policy_arn": "arn:aws:iam::385595570414:policy/eks-worker-autoscaling-my-cluster20190702141950399500000007", + "role": "my-cluster20190702141950357100000005" + }, + "private": "bnVsbA==", + "depends_on": [ + "aws_iam_policy.worker_autoscaling[0]", + "aws_iam_role.workers[0]" + ] + } + ] + }, + { + "module": "module.ec2", + "mode": "managed", + "type": "aws_instance", + "name": "this", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.ec2", + "mode": "managed", + "type": "aws_instance", + "name": "this_t2", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_internet_gateway", + "name": "this", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.key-pair", + "mode": "managed", + "type": "aws_key_pair", + "name": "generated", + "provider": "provider.aws", + "instances": [ + { + "schema_version": 1, + "attributes": { + "fingerprint": "34:26:51:9e:09:d0:63:28:eb:5e:87:b6:f3:ea:24:bd", + "id": "my-cluster", + "key_name": "my-cluster", + "key_name_prefix": null, + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyw+DCyCDjwxIeR6joCJIplaCryriU7rX5jmOqi2LHGCvraiCthfZw3EQBc9ktgITXZ6OCLcR3mO6gub5mEzkRZQFUkF4DkiUxB/8emuqeJgS7ngq5XvDARqqCNT1N2e/cOp+LIeM4vCpfbZQcZzOzp87TSGF1x8OkyqDmqCW1wk0c98QbZiH2BJaLxO9hmrfVTAeUQPiZftn260Lqm0yENYuids0NpWD4usulE36ssRNxyxaNMPRA07vVa1IhG8GzMwKk5YACRJhdtZQEXFOFo0jNuFrR7FX+Dg8UN3YvxUATJIMr2UeMzWIjoRrXBd1LEH6JWxy1V2owrWPi4E5P" + }, + "private": "eyJzY2hlbWFfdmVyc2lvbiI6IjEifQ==", + "depends_on": [ + "tls_private_key.generated" + ] + } + ] + }, + { + "module": "module.default-cluster", + "mode": "managed", + "type": "aws_launch_configuration", + "name": "workers", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.test-cluster", + "mode": "managed", + "type": "aws_launch_configuration", + "name": "workers", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_launch_configuration", + "name": "workers", + "each": "list", + "provider": "provider.aws", + "instances": [ + { + "index_key": 0, + "schema_version": 0, + "attributes": { + "associate_public_ip_address": false, + "ebs_block_device": [], + "ebs_optimized": false, + "enable_monitoring": true, + "ephemeral_block_device": [], + "iam_instance_profile": "my-cluster20190702141952556900000008", + "id": "my-cluster-my-cluster-control2019070214201744850000000d", + "image_id": "ami-0f11fd98b02f12a4c", + "instance_type": "t2.xlarge", + "key_name": "my-cluster", + "name": "my-cluster-my-cluster-control2019070214201744850000000d", + "name_prefix": "my-cluster-my-cluster-control", + "placement_tenancy": null, + "root_block_device": [ + { + "delete_on_termination": true, + "iops": 0, + "volume_size": 100, + "volume_type": "gp2" + } + ], + "security_groups": [ + "sg-0901b76a8e3e7055e" + ], + "spot_price": "", + "user_data": null, + "user_data_base64": "IyEvYmluL2Jhc2ggLXhlCgojIEFsbG93IHVzZXIgc3VwcGxpZWQgcHJlIHVzZXJkYXRhIGNvZGUKCgojIEJvb3RzdHJhcCBhbmQgam9pbiB0aGUgY2x1c3RlcgovZXRjL2Vrcy9ib290c3RyYXAuc2ggLS1iNjQtY2x1c3Rlci1jYSAnTFMwdExTMUNSVWRKVGlCRFJWSlVTVVpKUTBGVVJTMHRMUzB0Q2sxSlNVTjVSRU5EUVdKRFowRjNTVUpCWjBsQ1FVUkJUa0puYTNGb2EybEhPWGN3UWtGUmMwWkJSRUZXVFZKTmQwVlJXVVJXVVZGRVJYZHdjbVJYU213S1kyMDFiR1JIVm5wTlFqUllSRlJGTlUxRVkzZE5ha1V3VFZSWk1VOVdiMWhFVkVrMVRVUlplVTlVUlRCTlZGa3hUMVp2ZDBaVVJWUk5Ra1ZIUVRGVlJRcEJlRTFMWVROV2FWcFlTblZhV0ZKc1kzcERRMEZUU1hkRVVWbEtTMjlhU1doMlkwNUJVVVZDUWxGQlJHZG5SVkJCUkVORFFWRnZRMmRuUlVKQlMwSjRDbVZLZGxFelNsZEZia1pxY2tGQlJWRlZUREZVUzBGVFJVUnhhVXQ1UVhsd2QySmxOMFV3VkU1alFuQnFSMWt2VXpGVFpuUlpNbFpFT0d0UWJuRm5jak1LT0hGdVlYbDNkMWN2T0VSWWRqRXZOMHQ1T0hkc1UyNUdUMnh1VGpaWldrNXZVbTlqUkZFMlYxSk5aMlV6YzNwNU1GaDNMeTl5V1c1Q1FtOHhlaXRGWlFwSE5uTlZOemxYUWpsNEsxVktkV0pOYVdKNU5XOVlZWE5xVVVOdFpGVkhPV2t6VmtsTVFUSkhiMXBHVG5OMGQxVk9VVGxUU1VoYVJscElXa2x0VkZSckNtZHFjVTExVDNkeFNHWkphM1ZETmxORFJFVXdUbmh0U1VWcldVOHdiMUo2V1ZkSVNDdDZPRXN4UWpGaE1HRjJjMXA0TnpKR1pqSjFVRTVvVGxFMVJVMEtReTk1ZGpGbUwzZGFUREUyUkhKQk1VUlVURkpTV0ZsbmJYUXlMMmx2UVZSV1JGcFFkV2hKUXpreldITlpNSEkxYmxZcmQyeFdRbWwwTlZadllUSjVOQXA2ZFhwaGRXaEVNMWd6TVRCclpWQmhVWFZqUTBGM1JVRkJZVTFxVFVORmQwUm5XVVJXVWpCUVFWRklMMEpCVVVSQlowdHJUVUU0UjBFeFZXUkZkMFZDQ2k5M1VVWk5RVTFDUVdZNGQwUlJXVXBMYjFwSmFIWmpUa0ZSUlV4Q1VVRkVaMmRGUWtGSlREQk9iRzFDUnlzcmRqbDNiamQwZUhwaWFHeG9lV1ZPTTJNS2JVRTNOVE5VU1RCUk9XbEdlbkozU0VSa1YzSjNUbk1yYlc5WFN6bFJiVUpoT0cxWVRtaHlaVXBJWVhwUFVXdDBjVU5IUWtNNE9GWmtlWGhIWkU5UFp3bzJTV1ZRU25BMk5XODNZelJZU1hkVFkxbGthaTl3V1hwTGVIWlhhUzgyWmk5YWEwMDBUekF4UVVwVVprVmFXV1ZFYm1wbWFYWmthR3R6ZGtkMlZWRnVDakk1VERabVFrZzJkWGg1YTNrMGNWSnJhVzlrVGtwcE1IVklhemRhTlVWWU5TdE1jaXN6TnpoQmVIQktPRUpWTUZwd2JrMXNOVmhOVUZSMWJ6RTVWeXNLUjNCNVVFbFZRWEJVYW10UWRDOU5lbVE0Y3pkeVdESm1RV1JhWVc5dlVVTnZORXc0ZGtwR1dIZHVkREJIVVVwekwzYzVTMnRpWVhjeFkwMHhjWFJaVFFwRmEzaERRbm81TTBsMU9WaHlVRlZvTUc5dGRYVnlSMmQ1YWtSTGRVSm9Ua1pDYnk4d09HTXZabXg2VVZsU1ZHUkNSbXBYTlRZNWMzVlZRVDBLTFMwdExTMUZUa1FnUTBWU1ZFbEdTVU5CVkVVdExTMHRMUW89JyAtLWFwaXNlcnZlci1lbmRwb2ludCAnaHR0cHM6Ly8yQjA5RUQyMzI5REY0MDEyQ0MwQTY3MDVCRkIxNDk2NS55bDQudXMtd2VzdC0yLmVrcy5hbWF6b25hd3MuY29tJyAgLS1rdWJlbGV0LWV4dHJhLWFyZ3MgJycgJ215LWNsdXN0ZXInCgojIEFsbG93IHVzZXIgc3VwcGxpZWQgdXNlcmRhdGEgY29kZQoK", + "vpc_classic_link_id": "", + "vpc_classic_link_security_groups": [] + }, + "private": "bnVsbA==", + "depends_on": [ + "aws_eks_cluster.this", + "aws_iam_instance_profile.workers", + "data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile", + "data.template_file.userdata" + ] + } + ] + }, + { + "module": "module.default-cluster", + "mode": "managed", + "type": "aws_launch_template", + "name": "workers_launch_template", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.test-cluster", + "mode": "managed", + "type": "aws_launch_template", + "name": "workers_launch_template", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_launch_template", + "name": "workers_launch_template", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_launch_template", + "name": "workers_launch_template_mixed", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_nat_gateway", + "name": "this", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_network_acl", + "name": "database", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_network_acl", + "name": "elasticache", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_network_acl", + "name": "intra", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_network_acl", + "name": "private", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_network_acl", + "name": "public", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_network_acl", + "name": "redshift", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_network_acl_rule", + "name": "database_inbound", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_network_acl_rule", + "name": "database_outbound", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_network_acl_rule", + "name": "elasticache_inbound", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_network_acl_rule", + "name": "elasticache_outbound", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_network_acl_rule", + "name": "intra_inbound", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_network_acl_rule", + "name": "intra_outbound", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_network_acl_rule", + "name": "private_inbound", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_network_acl_rule", + "name": "private_outbound", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_network_acl_rule", + "name": "public_inbound", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_network_acl_rule", + "name": "public_outbound", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_network_acl_rule", + "name": "redshift_inbound", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_network_acl_rule", + "name": "redshift_outbound", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_redshift_subnet_group", + "name": "redshift", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_route", + "name": "database_internet_gateway", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_route", + "name": "database_nat_gateway", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_route", + "name": "private_nat_gateway", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_route", + "name": "public_internet_gateway", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_route_table", + "name": "database", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_route_table", + "name": "elasticache", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_route_table", + "name": "intra", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_route_table", + "name": "private", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_route_table", + "name": "public", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_route_table", + "name": "redshift", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_route_table_association", + "name": "database", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_route_table_association", + "name": "elasticache", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_route_table_association", + "name": "intra", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_route_table_association", + "name": "private", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_route_table_association", + "name": "public", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_route_table_association", + "name": "redshift", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_route_table_association", + "name": "redshift_public", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_security_group", + "name": "cluster", + "each": "list", + "provider": "provider.aws", + "instances": [ + { + "index_key": 0, + "schema_version": 1, + "attributes": { + "arn": "arn:aws:ec2:us-west-2:385595570414:security-group/sg-049b5a71010e17d3f", + "description": "EKS cluster security group.", + "egress": [ + { + "cidr_blocks": [ + "0.0.0.0/0" + ], + "description": "Allow cluster egress access to the Internet.", + "from_port": 0, + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "-1", + "security_groups": [], + "self": false, + "to_port": 0 + } + ], + "id": "sg-049b5a71010e17d3f", + "ingress": [ + { + "cidr_blocks": [], + "description": "Allow pods to communicate with the EKS cluster API.", + "from_port": 443, + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "tcp", + "security_groups": [ + "sg-0901b76a8e3e7055e" + ], + "self": false, + "to_port": 443 + } + ], + "name": "my-cluster20190702140914437300000004", + "name_prefix": "my-cluster", + "owner_id": "385595570414", + "revoke_rules_on_delete": false, + "tags": { + "Name": "my-cluster-eks_cluster_sg", + "app": "tidb" + }, + "timeouts": null, + "vpc_id": "vpc-045ead8290dd948d4" + }, + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6NjAwMDAwMDAwMDAwfSwic2NoZW1hX3ZlcnNpb24iOiIxIn0=" + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_security_group", + "name": "workers", + "each": "list", + "provider": "provider.aws", + "instances": [ + { + "index_key": 0, + "schema_version": 1, + "attributes": { + "arn": "arn:aws:ec2:us-west-2:385595570414:security-group/sg-0901b76a8e3e7055e", + "description": "Security group for all nodes in the cluster.", + "egress": [ + { + "cidr_blocks": [ + "0.0.0.0/0" + ], + "description": "Allow nodes all egress to the Internet.", + "from_port": 0, + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "-1", + "security_groups": [], + "self": false, + "to_port": 0 + } + ], + "id": "sg-0901b76a8e3e7055e", + "ingress": [ + { + "cidr_blocks": [ + "0.0.0.0/0" + ], + "description": "kubernetes.io/rule/nlb/client=a0d6855d89cd511e9a9c20a756cd94eb", + "from_port": 31362, + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "tcp", + "security_groups": [], + "self": false, + "to_port": 31362 + }, + { + "cidr_blocks": [ + "0.0.0.0/0" + ], + "description": "kubernetes.io/rule/nlb/client=a0d6855d89cd511e9a9c20a756cd94eb", + "from_port": 31945, + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "tcp", + "security_groups": [], + "self": false, + "to_port": 31945 + }, + { + "cidr_blocks": [ + "0.0.0.0/0" + ], + "description": "kubernetes.io/rule/nlb/client=aff4a46c59cd411e9a9c20a756cd94eb", + "from_port": 31303, + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "tcp", + "security_groups": [], + "self": false, + "to_port": 31303 + }, + { + "cidr_blocks": [ + "0.0.0.0/0" + ], + "description": "kubernetes.io/rule/nlb/client=aff4a46c59cd411e9a9c20a756cd94eb", + "from_port": 31366, + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "tcp", + "security_groups": [], + "self": false, + "to_port": 31366 + }, + { + "cidr_blocks": [ + "0.0.0.0/0" + ], + "description": "kubernetes.io/rule/nlb/mtu=aff4a46c59cd411e9a9c20a756cd94eb", + "from_port": 3, + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "icmp", + "security_groups": [], + "self": false, + "to_port": 4 + }, + { + "cidr_blocks": [ + "10.0.0.0/16" + ], + "description": "kubernetes.io/rule/nlb/health=a0d6855d89cd511e9a9c20a756cd94eb", + "from_port": 31362, + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "tcp", + "security_groups": [], + "self": false, + "to_port": 31362 + }, + { + "cidr_blocks": [ + "10.0.0.0/16" + ], + "description": "kubernetes.io/rule/nlb/health=a0d6855d89cd511e9a9c20a756cd94eb", + "from_port": 31945, + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "tcp", + "security_groups": [], + "self": false, + "to_port": 31945 + }, + { + "cidr_blocks": [ + "10.0.0.0/16" + ], + "description": "kubernetes.io/rule/nlb/health=aff4a46c59cd411e9a9c20a756cd94eb", + "from_port": 31303, + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "tcp", + "security_groups": [], + "self": false, + "to_port": 31303 + }, + { + "cidr_blocks": [ + "10.0.0.0/16" + ], + "description": "kubernetes.io/rule/nlb/health=aff4a46c59cd411e9a9c20a756cd94eb", + "from_port": 31366, + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "tcp", + "security_groups": [], + "self": false, + "to_port": 31366 + }, + { + "cidr_blocks": [], + "description": "", + "from_port": 0, + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "-1", + "security_groups": [ + "sg-02c1a918ddf291a13", + "sg-099c00ee2d9fe36ef" + ], + "self": false, + "to_port": 0 + }, + { + "cidr_blocks": [], + "description": "Allow node to communicate with each other.", + "from_port": 0, + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "-1", + "security_groups": [], + "self": true, + "to_port": 0 + }, + { + "cidr_blocks": [], + "description": "Allow pods running extension API servers on port 443 to receive communication from cluster control plane.", + "from_port": 443, + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "tcp", + "security_groups": [ + "sg-049b5a71010e17d3f" + ], + "self": false, + "to_port": 443 + }, + { + "cidr_blocks": [], + "description": "Allow workers pods to receive communication from the cluster control plane.", + "from_port": 1025, + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "tcp", + "security_groups": [ + "sg-049b5a71010e17d3f" + ], + "self": false, + "to_port": 65535 + } + ], + "name": "my-cluster20190702141950359500000006", + "name_prefix": "my-cluster", + "owner_id": "385595570414", + "revoke_rules_on_delete": false, + "tags": { + "Name": "my-cluster-eks_worker_sg", + "app": "tidb", + "kubernetes.io/cluster/my-cluster": "owned" + }, + "timeouts": null, + "vpc_id": "vpc-045ead8290dd948d4" + }, + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6NjAwMDAwMDAwMDAwfSwic2NoZW1hX3ZlcnNpb24iOiIxIn0=", + "depends_on": [ + "aws_eks_cluster.this" + ] + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_security_group_rule", + "name": "cluster_egress_internet", + "each": "list", + "provider": "provider.aws", + "instances": [ + { + "index_key": 0, + "schema_version": 2, + "attributes": { + "cidr_blocks": [ + "0.0.0.0/0" + ], + "description": "Allow cluster egress access to the Internet.", + "from_port": 0, + "id": "sgrule-3629728231", + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "-1", + "security_group_id": "sg-049b5a71010e17d3f", + "self": false, + "source_security_group_id": null, + "to_port": 0, + "type": "egress" + }, + "private": "eyJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", + "depends_on": [ + "aws_security_group.cluster[0]" + ] + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_security_group_rule", + "name": "cluster_https_worker_ingress", + "each": "list", + "provider": "provider.aws", + "instances": [ + { + "index_key": 0, + "schema_version": 2, + "attributes": { + "cidr_blocks": [], + "description": "Allow pods to communicate with the EKS cluster API.", + "from_port": 443, + "id": "sgrule-105930882", + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "tcp", + "security_group_id": "sg-049b5a71010e17d3f", + "self": false, + "source_security_group_id": "sg-0901b76a8e3e7055e", + "to_port": 443, + "type": "ingress" + }, + "private": "eyJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", + "depends_on": [ + "aws_security_group.cluster[0]" + ] + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_security_group_rule", + "name": "workers_egress_internet", + "each": "list", + "provider": "provider.aws", + "instances": [ + { + "index_key": 0, + "schema_version": 2, + "attributes": { + "cidr_blocks": [ + "0.0.0.0/0" + ], + "description": "Allow nodes all egress to the Internet.", + "from_port": 0, + "id": "sgrule-2908448787", + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "-1", + "security_group_id": "sg-0901b76a8e3e7055e", + "self": false, + "source_security_group_id": null, + "to_port": 0, + "type": "egress" + }, + "private": "eyJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", + "depends_on": [ + "aws_security_group.workers[0]" + ] + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_security_group_rule", + "name": "workers_ingress_cluster", + "each": "list", + "provider": "provider.aws", + "instances": [ + { + "index_key": 0, + "schema_version": 2, + "attributes": { + "cidr_blocks": [], + "description": "Allow workers pods to receive communication from the cluster control plane.", + "from_port": 1025, + "id": "sgrule-3437507016", + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "tcp", + "security_group_id": "sg-0901b76a8e3e7055e", + "self": false, + "source_security_group_id": "sg-049b5a71010e17d3f", + "to_port": 65535, + "type": "ingress" + }, + "private": "eyJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", + "depends_on": [ + "aws_security_group.workers[0]" + ] + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_security_group_rule", + "name": "workers_ingress_cluster_https", + "each": "list", + "provider": "provider.aws", + "instances": [ + { + "index_key": 0, + "schema_version": 2, + "attributes": { + "cidr_blocks": [], + "description": "Allow pods running extension API servers on port 443 to receive communication from cluster control plane.", + "from_port": 443, + "id": "sgrule-1750768348", + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "tcp", + "security_group_id": "sg-0901b76a8e3e7055e", + "self": false, + "source_security_group_id": "sg-049b5a71010e17d3f", + "to_port": 443, + "type": "ingress" + }, + "private": "eyJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", + "depends_on": [ + "aws_security_group.workers[0]" + ] + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_security_group_rule", + "name": "workers_ingress_cluster_kubelet", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "aws_security_group_rule", + "name": "workers_ingress_self", + "each": "list", + "provider": "provider.aws", + "instances": [ + { + "index_key": 0, + "schema_version": 2, + "attributes": { + "cidr_blocks": [], + "description": "Allow node to communicate with each other.", + "from_port": 0, + "id": "sgrule-785489295", + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "-1", + "security_group_id": "sg-0901b76a8e3e7055e", + "self": false, + "source_security_group_id": "sg-0901b76a8e3e7055e", + "to_port": 0, + "type": "ingress" + }, + "private": "eyJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", + "depends_on": [ + "aws_security_group.workers[0]" + ] + } + ] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_subnet", + "name": "database", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_subnet", + "name": "elasticache", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_subnet", + "name": "intra", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_subnet", + "name": "private", + "each": "list", + "provider": "provider.aws", + "instances": [ + { + "index_key": 0, + "schema_version": 1, + "attributes": { + "arn": "arn:aws:ec2:us-west-2:385595570414:subnet/subnet-08b8c355f30ed58ba", + "assign_ipv6_address_on_creation": false, + "availability_zone": "us-west-2a", + "availability_zone_id": "usw2-az2", + "cidr_block": "10.0.16.0/20", + "id": "subnet-08b8c355f30ed58ba", + "ipv6_cidr_block": "", + "ipv6_cidr_block_association_id": "", + "map_public_ip_on_launch": false, + "owner_id": "385595570414", + "tags": { + "Name": "my-cluster-private-us-west-2a", + "kubernetes.io/cluster/my-cluster": "shared" + }, + "timeouts": null, + "vpc_id": "vpc-045ead8290dd948d4" + }, + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9" + }, + { + "index_key": 1, + "schema_version": 1, + "attributes": { + "arn": "arn:aws:ec2:us-west-2:385595570414:subnet/subnet-0869b629c725c799c", + "assign_ipv6_address_on_creation": false, + "availability_zone": "us-west-2b", + "availability_zone_id": "usw2-az1", + "cidr_block": "10.0.32.0/20", + "id": "subnet-0869b629c725c799c", + "ipv6_cidr_block": "", + "ipv6_cidr_block_association_id": "", + "map_public_ip_on_launch": false, + "owner_id": "385595570414", + "tags": { + "Name": "my-cluster-private-us-west-2b", + "kubernetes.io/cluster/my-cluster": "shared" + }, + "timeouts": null, + "vpc_id": "vpc-045ead8290dd948d4" + }, + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9" + }, + { + "index_key": 2, + "schema_version": 1, + "attributes": { + "arn": "arn:aws:ec2:us-west-2:385595570414:subnet/subnet-01c62f5324fe05605", + "assign_ipv6_address_on_creation": false, + "availability_zone": "us-west-2c", + "availability_zone_id": "usw2-az3", + "cidr_block": "10.0.48.0/20", + "id": "subnet-01c62f5324fe05605", + "ipv6_cidr_block": "", + "ipv6_cidr_block_association_id": "", + "map_public_ip_on_launch": false, + "owner_id": "385595570414", + "tags": { + "Name": "my-cluster-private-us-west-2c", + "kubernetes.io/cluster/my-cluster": "shared" + }, + "timeouts": null, + "vpc_id": "vpc-045ead8290dd948d4" + }, + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9" + } + ] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_subnet", + "name": "public", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_subnet", + "name": "redshift", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc", + "name": "this", + "each": "list", + "provider": "provider.aws", + "instances": [ + { + "index_key": 0, + "schema_version": 1, + "attributes": { + "arn": "arn:aws:ec2:us-west-2:385595570414:vpc/vpc-045ead8290dd948d4", + "assign_generated_ipv6_cidr_block": false, + "cidr_block": "10.0.0.0/16", + "default_network_acl_id": "acl-01d834214145117e1", + "default_route_table_id": "rtb-00a392b6b0f316355", + "default_security_group_id": "sg-0c9dd6ab66e0d2227", + "dhcp_options_id": "dopt-0062aa79", + "enable_classiclink": false, + "enable_classiclink_dns_support": false, + "enable_dns_hostnames": false, + "enable_dns_support": true, + "id": "vpc-045ead8290dd948d4", + "instance_tenancy": "default", + "ipv6_association_id": "", + "ipv6_cidr_block": "", + "main_route_table_id": "rtb-00a392b6b0f316355", + "owner_id": "385595570414", + "tags": { + "Name": "my-cluster", + "kubernetes.io/cluster/my-cluster": "shared" + } + }, + "private": "eyJzY2hlbWFfdmVyc2lvbiI6IjEifQ==" + } + ] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_dhcp_options", + "name": "this", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_dhcp_options_association", + "name": "this", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint", + "name": "apigw", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint", + "name": "cloudtrail", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint", + "name": "dynamodb", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint", + "name": "ec2", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint", + "name": "ec2messages", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint", + "name": "ecr_api", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint", + "name": "ecr_dkr", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint", + "name": "ecs", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint", + "name": "ecs_agent", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint", + "name": "ecs_telemetry", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint", + "name": "elasticloadbalancing", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint", + "name": "events", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint", + "name": "kms", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint", + "name": "logs", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint", + "name": "monitoring", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint", + "name": "s3", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint", + "name": "sns", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint", + "name": "sqs", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint", + "name": "ssm", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint", + "name": "ssmmessages", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint_route_table_association", + "name": "intra_dynamodb", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint_route_table_association", + "name": "intra_s3", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint_route_table_association", + "name": "private_dynamodb", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint_route_table_association", + "name": "private_s3", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint_route_table_association", + "name": "public_dynamodb", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_endpoint_route_table_association", + "name": "public_s3", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpc_ipv4_cidr_block_association", + "name": "this", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpn_gateway", + "name": "this", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpn_gateway_attachment", + "name": "this", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpn_gateway_route_propagation", + "name": "private", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.vpc", + "mode": "managed", + "type": "aws_vpn_gateway_route_propagation", + "name": "public", + "each": "list", + "provider": "provider.aws", + "instances": [] + }, + { + "module": "module.default-cluster", + "mode": "managed", + "type": "helm_release", + "name": "tidb-cluster", + "provider": "provider.helm.eks", + "instances": [] + }, + { + "module": "module.test-cluster", + "mode": "managed", + "type": "helm_release", + "name": "tidb-cluster", + "provider": "provider.helm.eks", + "instances": [] + }, + { + "module": "module.tidb-operator", + "mode": "managed", + "type": "helm_release", + "name": "tidb-operator", + "provider": "module.tidb-operator.provider.helm", + "instances": [ + { + "schema_version": 0, + "attributes": { + "chart": "tidb-operator", + "devel": null, + "disable_webhooks": false, + "force_update": false, + "id": "tidb-operator", + "keyring": null, + "metadata": [ + { + "chart": "tidb-operator", + "name": "tidb-operator", + "namespace": "tidb-admin", + "revision": 1, + "values": "{}\n", + "version": "v1.0.0-beta.3" + } + ], + "name": "tidb-operator", + "namespace": "tidb-admin", + "recreate_pods": false, + "repository": "pingcap", + "reuse": false, + "reuse_values": false, + "set": [], + "set_sensitive": [], + "set_string": [], + "status": "DEPLOYED", + "timeout": 300, + "values": [ + "" + ], + "verify": false, + "version": "v1.0.0-beta.3", + "wait": true + } + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "local_file", + "name": "config_map_aws_auth", + "each": "list", + "provider": "provider.local", + "instances": [ + { + "index_key": 0, + "schema_version": 0, + "attributes": { + "content": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: aws-auth\n namespace: kube-system\ndata:\n mapRoles: |\n - rolearn: arn:aws:iam::385595570414:role/my-cluster20190702141950357100000005\n username: system:node:{{EC2PrivateDNSName}}\n groups:\n - system:bootstrappers\n - system:nodes\n\n\n mapUsers: |\n\n mapAccounts: |\n\n", + "content_base64": null, + "filename": "credentials/config-map-aws-auth_my-cluster.yaml", + "id": "978ee48452ea948de06accdfb8813993dcee1a6b", + "sensitive_content": null + }, + "private": "bnVsbA==", + "depends_on": [ + "data.template_file.config_map_aws_auth" + ] + } + ] + }, + { + "module": "module.tidb-operator", + "mode": "managed", + "type": "local_file", + "name": "kubeconfig", + "provider": "provider.local", + "instances": [ + { + "schema_version": 0, + "attributes": { + "content": null, + "content_base64": null, + "filename": "credentials/kubeconfig_my-cluster", + "id": "94c5caf94ec9455972b0f8540c6aaa42617c43f8", + "sensitive_content": "apiVersion: v1\npreferences: {}\nkind: Config\n\nclusters:\n- cluster:\n server: https://2B09ED2329DF4012CC0A6705BFB14965.yl4.us-west-2.eks.amazonaws.com\n certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNU1EY3dNakUwTVRZMU9Wb1hEVEk1TURZeU9URTBNVFkxT1Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS0J4CmVKdlEzSldFbkZqckFBRVFVTDFUS0FTRURxaUt5QXlwd2JlN0UwVE5jQnBqR1kvUzFTZnRZMlZEOGtQbnFncjMKOHFuYXl3d1cvOERYdjEvN0t5OHdsU25GT2xuTjZZWk5vUm9jRFE2V1JNZ2Uzc3p5MFh3Ly9yWW5CQm8xeitFZQpHNnNVNzlXQjl4K1VKdWJNaWJ5NW9YYXNqUUNtZFVHOWkzVklMQTJHb1pGTnN0d1VOUTlTSUhaRlpIWkltVFRrCmdqcU11T3dxSGZJa3VDNlNDREUwTnhtSUVrWU8wb1J6WVdISCt6OEsxQjFhMGF2c1p4NzJGZjJ1UE5oTlE1RU0KQy95djFmL3daTDE2RHJBMURUTFJSWFlnbXQyL2lvQVRWRFpQdWhJQzkzWHNZMHI1blYrd2xWQml0NVZvYTJ5NAp6dXphdWhEM1gzMTBrZVBhUXVjQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFJTDBObG1CRysrdjl3bjd0eHpiaGxoeWVOM2MKbUE3NTNUSTBROWlGenJ3SERkV3J3TnMrbW9XSzlRbUJhOG1YTmhyZUpIYXpPUWt0cUNHQkM4OFZkeXhHZE9PZwo2SWVQSnA2NW83YzRYSXdTY1lkai9wWXpLeHZXaS82Zi9aa000TzAxQUpUZkVaWWVEbmpmaXZkaGtzdkd2VVFuCjI5TDZmQkg2dXh5a3k0cVJraW9kTkppMHVIazdaNUVYNStMciszNzhBeHBKOEJVMFpwbk1sNVhNUFR1bzE5VysKR3B5UElVQXBUamtQdC9NemQ4czdyWDJmQWRaYW9vUUNvNEw4dkpGWHdudDBHUUpzL3c5S2tiYXcxY00xcXRZTQpFa3hDQno5M0l1OVhyUFVoMG9tdXVyR2d5akRLdUJoTkZCby8wOGMvZmx6UVlSVGRCRmpXNTY5c3VVQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=\n name: eks_my-cluster\n\ncontexts:\n- context:\n cluster: eks_my-cluster\n user: eks_my-cluster\n name: eks_my-cluster\n\ncurrent-context: eks_my-cluster\n\nusers:\n- name: eks_my-cluster\n user:\n exec:\n apiVersion: client.authentication.k8s.io/v1alpha1\n command: aws-iam-authenticator\n args:\n - \"token\"\n - \"-i\"\n - \"my-cluster\"\n\n\n" + }, + "private": "bnVsbA==", + "depends_on": [ + "module.eks" + ] + } + ] + }, + { + "mode": "managed", + "type": "local_file", + "name": "kubeconfig", + "provider": "provider.local", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "local_file", + "name": "kubeconfig", + "each": "list", + "provider": "provider.local", + "instances": [ + { + "index_key": 0, + "schema_version": 0, + "attributes": { + "content": "apiVersion: v1\npreferences: {}\nkind: Config\n\nclusters:\n- cluster:\n server: https://2B09ED2329DF4012CC0A6705BFB14965.yl4.us-west-2.eks.amazonaws.com\n certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNU1EY3dNakUwTVRZMU9Wb1hEVEk1TURZeU9URTBNVFkxT1Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS0J4CmVKdlEzSldFbkZqckFBRVFVTDFUS0FTRURxaUt5QXlwd2JlN0UwVE5jQnBqR1kvUzFTZnRZMlZEOGtQbnFncjMKOHFuYXl3d1cvOERYdjEvN0t5OHdsU25GT2xuTjZZWk5vUm9jRFE2V1JNZ2Uzc3p5MFh3Ly9yWW5CQm8xeitFZQpHNnNVNzlXQjl4K1VKdWJNaWJ5NW9YYXNqUUNtZFVHOWkzVklMQTJHb1pGTnN0d1VOUTlTSUhaRlpIWkltVFRrCmdqcU11T3dxSGZJa3VDNlNDREUwTnhtSUVrWU8wb1J6WVdISCt6OEsxQjFhMGF2c1p4NzJGZjJ1UE5oTlE1RU0KQy95djFmL3daTDE2RHJBMURUTFJSWFlnbXQyL2lvQVRWRFpQdWhJQzkzWHNZMHI1blYrd2xWQml0NVZvYTJ5NAp6dXphdWhEM1gzMTBrZVBhUXVjQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFJTDBObG1CRysrdjl3bjd0eHpiaGxoeWVOM2MKbUE3NTNUSTBROWlGenJ3SERkV3J3TnMrbW9XSzlRbUJhOG1YTmhyZUpIYXpPUWt0cUNHQkM4OFZkeXhHZE9PZwo2SWVQSnA2NW83YzRYSXdTY1lkai9wWXpLeHZXaS82Zi9aa000TzAxQUpUZkVaWWVEbmpmaXZkaGtzdkd2VVFuCjI5TDZmQkg2dXh5a3k0cVJraW9kTkppMHVIazdaNUVYNStMciszNzhBeHBKOEJVMFpwbk1sNVhNUFR1bzE5VysKR3B5UElVQXBUamtQdC9NemQ4czdyWDJmQWRaYW9vUUNvNEw4dkpGWHdudDBHUUpzL3c5S2tiYXcxY00xcXRZTQpFa3hDQno5M0l1OVhyUFVoMG9tdXVyR2d5akRLdUJoTkZCby8wOGMvZmx6UVlSVGRCRmpXNTY5c3VVQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=\n name: eks_my-cluster\n\ncontexts:\n- context:\n cluster: eks_my-cluster\n user: eks_my-cluster\n name: eks_my-cluster\n\ncurrent-context: eks_my-cluster\n\nusers:\n- name: eks_my-cluster\n user:\n exec:\n apiVersion: client.authentication.k8s.io/v1alpha1\n command: aws-iam-authenticator\n args:\n - \"token\"\n - \"-i\"\n - \"my-cluster\"\n\n\n", + "content_base64": null, + "filename": "credentials/kubeconfig_my-cluster", + "id": "94c5caf94ec9455972b0f8540c6aaa42617c43f8", + "sensitive_content": null + }, + "private": "bnVsbA==", + "depends_on": [ + "data.template_file.kubeconfig" + ] + } + ] + }, + { + "module": "module.key-pair", + "mode": "managed", + "type": "local_file", + "name": "private_key_pem", + "each": "list", + "provider": "provider.local", + "instances": [] + }, + { + "module": "module.key-pair", + "mode": "managed", + "type": "local_file", + "name": "public_key_openssh", + "each": "list", + "provider": "provider.local", + "instances": [] + }, + { + "module": "module.key-pair", + "mode": "managed", + "type": "null_resource", + "name": "chmod", + "each": "list", + "provider": "provider.null", + "instances": [] + }, + { + "module": "module.tidb-operator", + "mode": "managed", + "type": "null_resource", + "name": "setup-env", + "provider": "provider.null", + "instances": [ + { + "schema_version": 0, + "attributes": { + "id": "4147419460234023980", + "triggers": null + }, + "depends_on": [ + "local_file.kubeconfig" + ] + } + ] + }, + { + "module": "module.default-cluster", + "mode": "managed", + "type": "null_resource", + "name": "tags_as_list_of_maps", + "each": "list", + "provider": "provider.null", + "instances": [] + }, + { + "module": "module.test-cluster", + "mode": "managed", + "type": "null_resource", + "name": "tags_as_list_of_maps", + "each": "list", + "provider": "provider.null", + "instances": [] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "null_resource", + "name": "tags_as_list_of_maps", + "each": "list", + "provider": "provider.null", + "instances": [ + { + "index_key": 0, + "schema_version": 0, + "attributes": { + "id": "8699955636061529248", + "triggers": { + "key": "app", + "propagate_at_launch": "true", + "value": "tidb" + } + } + } + ] + }, + { + "module": "module.tidb-operator.module.eks", + "mode": "managed", + "type": "null_resource", + "name": "update_config_map_aws_auth", + "each": "list", + "provider": "provider.null", + "instances": [ + { + "index_key": 0, + "schema_version": 0, + "attributes": { + "id": "7794443773218902071", + "triggers": { + "config_map_rendered": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: aws-auth\n namespace: kube-system\ndata:\n mapRoles: |\n - rolearn: arn:aws:iam::385595570414:role/my-cluster20190702141950357100000005\n username: system:node:{{EC2PrivateDNSName}}\n groups:\n - system:bootstrappers\n - system:nodes\n\n\n mapUsers: |\n\n mapAccounts: |\n\n", + "endpoint": "https://2B09ED2329DF4012CC0A6705BFB14965.yl4.us-west-2.eks.amazonaws.com", + "kube_config_map_rendered": "apiVersion: v1\npreferences: {}\nkind: Config\n\nclusters:\n- cluster:\n server: https://2B09ED2329DF4012CC0A6705BFB14965.yl4.us-west-2.eks.amazonaws.com\n certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNU1EY3dNakUwTVRZMU9Wb1hEVEk1TURZeU9URTBNVFkxT1Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS0J4CmVKdlEzSldFbkZqckFBRVFVTDFUS0FTRURxaUt5QXlwd2JlN0UwVE5jQnBqR1kvUzFTZnRZMlZEOGtQbnFncjMKOHFuYXl3d1cvOERYdjEvN0t5OHdsU25GT2xuTjZZWk5vUm9jRFE2V1JNZ2Uzc3p5MFh3Ly9yWW5CQm8xeitFZQpHNnNVNzlXQjl4K1VKdWJNaWJ5NW9YYXNqUUNtZFVHOWkzVklMQTJHb1pGTnN0d1VOUTlTSUhaRlpIWkltVFRrCmdqcU11T3dxSGZJa3VDNlNDREUwTnhtSUVrWU8wb1J6WVdISCt6OEsxQjFhMGF2c1p4NzJGZjJ1UE5oTlE1RU0KQy95djFmL3daTDE2RHJBMURUTFJSWFlnbXQyL2lvQVRWRFpQdWhJQzkzWHNZMHI1blYrd2xWQml0NVZvYTJ5NAp6dXphdWhEM1gzMTBrZVBhUXVjQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFJTDBObG1CRysrdjl3bjd0eHpiaGxoeWVOM2MKbUE3NTNUSTBROWlGenJ3SERkV3J3TnMrbW9XSzlRbUJhOG1YTmhyZUpIYXpPUWt0cUNHQkM4OFZkeXhHZE9PZwo2SWVQSnA2NW83YzRYSXdTY1lkai9wWXpLeHZXaS82Zi9aa000TzAxQUpUZkVaWWVEbmpmaXZkaGtzdkd2VVFuCjI5TDZmQkg2dXh5a3k0cVJraW9kTkppMHVIazdaNUVYNStMciszNzhBeHBKOEJVMFpwbk1sNVhNUFR1bzE5VysKR3B5UElVQXBUamtQdC9NemQ4czdyWDJmQWRaYW9vUUNvNEw4dkpGWHdudDBHUUpzL3c5S2tiYXcxY00xcXRZTQpFa3hDQno5M0l1OVhyUFVoMG9tdXVyR2d5akRLdUJoTkZCby8wOGMvZmx6UVlSVGRCRmpXNTY5c3VVQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=\n name: eks_my-cluster\n\ncontexts:\n- context:\n cluster: eks_my-cluster\n user: eks_my-cluster\n name: eks_my-cluster\n\ncurrent-context: eks_my-cluster\n\nusers:\n- name: eks_my-cluster\n user:\n exec:\n apiVersion: client.authentication.k8s.io/v1alpha1\n command: aws-iam-authenticator\n args:\n - \"token\"\n - \"-i\"\n - \"my-cluster\"\n\n\n" + } + }, + "depends_on": [ + "aws_eks_cluster.this", + "data.template_file.config_map_aws_auth", + "data.template_file.kubeconfig" + ] + } + ] + }, + { + "module": "module.default-cluster", + "mode": "managed", + "type": "null_resource", + "name": "wait-tidb-ready", + "provider": "provider.null", + "instances": [] + }, + { + "module": "module.test-cluster", + "mode": "managed", + "type": "null_resource", + "name": "wait-tidb-ready", + "provider": "provider.null", + "instances": [] + }, + { + "module": "module.key-pair", + "mode": "managed", + "type": "tls_private_key", + "name": "generated", + "provider": "provider.tls", + "instances": [ + { + "schema_version": 0, + "attributes": { + "algorithm": "RSA", + "ecdsa_curve": "P224", + "id": "9d6496bbd72df74a70b397d78f726756ce87e9d1", + "private_key_pem": "-----BEGIN RSA PRIVATE KEY-----\nMIIEogIBAAKCAQEAssPgwsgg48MSHkeo6AiSKZWgq8q4lO61+Y5jqotixxgr62og\nrYX2cNxEAXPZLYCE12ejgi3Ed5juoLm+ZhM5EWUBVJBeA5IlMQf/HprqniYEu54K\nuV7wwEaqgjU9Tdnv3DqfiyHjOLwqX22UHGczs6fO00hhdcfDpMqg5qgltcJNHPfE\nG2Yh9gSWi8TvYZq31UwHlED4mX7Z9utC6ptMhDWLonbNDaVg+LrLpRN+rLETccsW\njTD0QNO71WtSIRvBszMCpOWAAkSYXbWUBFxThaNIzbha0exV/g4PFDd2L8VAEySD\nK9lHjM1iI6Ea1wXdSxB+iVsctVdqMK1j4uBOTwIDAQABAoIBAFjF3vq7aWHRwFCb\nI2y+vN67uEM3c3w6ItIf/Kx3dYELJtxMIpgpO12GdJsIDaSD+vQBOnhuA++kWXQl\naUDFcQSLVSLKYnWBgMidgPqQ0cvhc148OHUfiYziStBIYf4kKPIDhrEQDgdhoeUr\nxG5qbYlc3t+bRRK5NhXCri589+UPGKjYD+Qzjm7VKlih+OOgRDLkD4nu+BDBlsVJ\ntaWsfZMjsKUp35sV1fvDMiap+FsI6i/CBZpf4eCnMXDQt5XnBrXrUeDxf/aZw1g9\n+niwzyjJCNTv6omA1YpJYA4p6WftcSSJWk7GvnelTwyQlXgjm6ZbM7BkM+iJkllm\nZlEOT4ECgYEA3Q+Y16DGazwy5C5XPCOTPNF3J91ib/ko0FfmKLzlEj/+jtIL2+o6\ndzd6ezvGU9c23Qp4FiILHVumxL8O+OYdGmOkhx3SqxbtqDODUHJYXumFUv0Jb3BH\nOjQgzCq/fUOcAYHbjdPIRhnUZ4yRoYDnerGPGVO2RyUMpf7AIDtlPI8CgYEAzwTw\nyyQD7g1uKptQ4f5IG/XqG9mmHFQ2Gu9uO3bvhZYtQEhArfh7OgnskK2L0fKcbZIX\nF1JBgJW8bNV6/9P//mts+Y4cValuqZtCmJjPOw2nX/u0X+/eJRHavfbFNlmACaMd\nBx7HGDHR6yZp9Of0TbUt56m7TBlSifaISk4oMkECgYAETrJ+uR5EpqajNZfzjwnm\nbHpy52hsoCFAdgYBEzUvdtnB9KvQfC7pdcZIMnD53z6tbe/LFpy61LdaLBLhnLJC\nemCRVW5ucQLufRp47dF0//3eERom9rwckTl2YPrcOP4INXyOteq4Gva9kcqgp/9a\nr60HJE9v8XPepCkgN6gQVwKBgFYAfXBG5AMPPUciAvX/x0EmZj1vq9x09401DpxR\niqv6eY4M9iHP6pFv8gEgt8defLHgUQt1NpUOn5qvDUwebGjrg/ggm5DStJBtWbs/\nMEgeIfxz+rkoUycfRbpJPCCaCeD3DGYa2Scp+0UvTjFZ81oc/JcTIiY5FtsNugz0\nbyqBAoGAe5+S7sO613qrSp1iL3Es9HoMnPniwZAmGyhhSG5bFCThKlKolR2XwzV0\n/bz78tdz9KAzEfalAwBt2b394nNttKYqQSwJYpDVWAgzycRDgvjOR5J4pAapS4sF\nH2R2SvIcXGxAOMp3eR8Ysq/Q06kKnGxOapt2gyDU3yJxjaE5PrQ=\n-----END RSA PRIVATE KEY-----\n", + "public_key_fingerprint_md5": "52:e4:05:eb:0f:b4:aa:34:46:27:1a:21:63:40:f5:39", + "public_key_openssh": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyw+DCyCDjwxIeR6joCJIplaCryriU7rX5jmOqi2LHGCvraiCthfZw3EQBc9ktgITXZ6OCLcR3mO6gub5mEzkRZQFUkF4DkiUxB/8emuqeJgS7ngq5XvDARqqCNT1N2e/cOp+LIeM4vCpfbZQcZzOzp87TSGF1x8OkyqDmqCW1wk0c98QbZiH2BJaLxO9hmrfVTAeUQPiZftn260Lqm0yENYuids0NpWD4usulE36ssRNxyxaNMPRA07vVa1IhG8GzMwKk5YACRJhdtZQEXFOFo0jNuFrR7FX+Dg8UN3YvxUATJIMr2UeMzWIjoRrXBd1LEH6JWxy1V2owrWPi4E5P\n", + "public_key_pem": "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAssPgwsgg48MSHkeo6AiS\nKZWgq8q4lO61+Y5jqotixxgr62ogrYX2cNxEAXPZLYCE12ejgi3Ed5juoLm+ZhM5\nEWUBVJBeA5IlMQf/HprqniYEu54KuV7wwEaqgjU9Tdnv3DqfiyHjOLwqX22UHGcz\ns6fO00hhdcfDpMqg5qgltcJNHPfEG2Yh9gSWi8TvYZq31UwHlED4mX7Z9utC6ptM\nhDWLonbNDaVg+LrLpRN+rLETccsWjTD0QNO71WtSIRvBszMCpOWAAkSYXbWUBFxT\nhaNIzbha0exV/g4PFDd2L8VAEySDK9lHjM1iI6Ea1wXdSxB+iVsctVdqMK1j4uBO\nTwIDAQAB\n-----END PUBLIC KEY-----\n", + "rsa_bits": 2048 + } + } + ] + } + ] +} diff --git a/deploy/aws/tidb-cluster/cluster.tf b/deploy/aws/tidb-cluster/cluster.tf index 643724bebe..9a3dc33076 100644 --- a/deploy/aws/tidb-cluster/cluster.tf +++ b/deploy/aws/tidb-cluster/cluster.tf @@ -1,21 +1,3 @@ -# kubernetes and helm providers rely on EKS, but terraform provider doesn't support depends_on -# follow this link https://github.com/hashicorp/terraform/issues/2430#issuecomment-370685911 -# we have the following hack -resource "local_file" "kubeconfig" { - depends_on = [var.eks] - sensitive_content = var.eks.kubeconfig - filename = var.eks.kubeconfig_filename -} - -provider "helm" { - insecure = true - # service_account = "tiller" - # install_tiller = true # currently this doesn't work, so we install tiller in the local-exec provisioner. See https://github.com/terraform-providers/terraform-provider-helm/issues/148 - kubernetes { - config_path = local_file.kubeconfig.filename - } -} - resource "null_resource" "wait-tiller-ready" { depends_on = [var.eks] @@ -24,10 +6,11 @@ resource "null_resource" "wait-tiller-ready" { command = < Date: Wed, 3 Jul 2019 01:21:28 +0800 Subject: [PATCH 09/11] Remove files added by mistake Signed-off-by: Aylei --- .../aws/terraform.tfstate.1562081991.backup | 3113 ----------------- 1 file changed, 3113 deletions(-) delete mode 100644 deploy/aws/terraform.tfstate.1562081991.backup diff --git a/deploy/aws/terraform.tfstate.1562081991.backup b/deploy/aws/terraform.tfstate.1562081991.backup deleted file mode 100644 index fc162942ba..0000000000 --- a/deploy/aws/terraform.tfstate.1562081991.backup +++ /dev/null @@ -1,3113 +0,0 @@ -{ - "version": 4, - "terraform_version": "0.12.3", - "serial": 1906, - "lineage": "0c22b019-b12a-5c6c-6ee4-a1daa6cb8515", - "outputs": { - "eks_endpoint": { - "value": "https://2B09ED2329DF4012CC0A6705BFB14965.yl4.us-west-2.eks.amazonaws.com", - "type": "string" - }, - "kubeconfig_filename": { - "value": "credentials/kubeconfig_my-cluster", - "type": "string" - } - }, - "resources": [ - { - "module": "module.tidb-operator.module.eks", - "mode": "data", - "type": "aws_ami", - "name": "eks_worker", - "provider": "provider.aws", - "instances": [ - { - "schema_version": 0, - "attributes": { - "architecture": "x86_64", - "block_device_mappings": [ - { - "device_name": "/dev/xvda", - "ebs": { - "delete_on_termination": "true", - "encrypted": "false", - "iops": "0", - "snapshot_id": "snap-0b4944fcf084e5907", - "volume_size": "20", - "volume_type": "gp2" - }, - "no_device": "", - "virtual_name": "" - } - ], - "creation_date": "2019-06-15T06:42:59.000Z", - "description": "EKS Kubernetes Worker AMI with AmazonLinux2 image (k8s: 1.12.7, docker:18.06)", - "executable_users": null, - "filter": [ - { - "name": "name", - "values": [ - "amazon-eks-node-1.12-v*" - ] - } - ], - "hypervisor": "xen", - "id": "ami-0f11fd98b02f12a4c", - "image_id": "ami-0f11fd98b02f12a4c", - "image_location": "amazon/amazon-eks-node-1.12-v20190614", - "image_owner_alias": "amazon", - "image_type": "machine", - "kernel_id": null, - "most_recent": true, - "name": "amazon-eks-node-1.12-v20190614", - "name_regex": null, - "owner_id": "602401143452", - "owners": [ - "602401143452" - ], - "platform": null, - "product_codes": [], - "public": true, - "ramdisk_id": null, - "root_device_name": "/dev/xvda", - "root_device_type": "ebs", - "root_snapshot_id": "snap-0b4944fcf084e5907", - "sriov_net_support": "simple", - "state": "available", - "state_reason": { - "code": "UNSET", - "message": "UNSET" - }, - "tags": {}, - "virtualization_type": "hvm" - } - } - ] - }, - { - "mode": "data", - "type": "aws_availability_zones", - "name": "available", - "provider": "provider.aws", - "instances": [ - { - "schema_version": 0, - "attributes": { - "blacklisted_names": null, - "blacklisted_zone_ids": null, - "id": "2019-07-02 15:16:35.907254 +0000 UTC", - "names": [ - "us-west-2a", - "us-west-2b", - "us-west-2c", - "us-west-2d" - ], - "state": null, - "zone_ids": [ - "usw2-az2", - "usw2-az1", - "usw2-az3", - "usw2-az4" - ] - } - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "data", - "type": "aws_caller_identity", - "name": "current", - "provider": "provider.aws", - "instances": [ - { - "schema_version": 0, - "attributes": { - "account_id": "385595570414", - "arn": "arn:aws:iam::385595570414:user/dengshsuan", - "id": "2019-07-02 15:16:36.89717 +0000 UTC", - "user_id": "AIDAVTR2JPDXCNJDMZJ6H" - } - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "data", - "type": "aws_iam_instance_profile", - "name": "custom_worker_group_iam_instance_profile", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "data", - "type": "aws_iam_instance_profile", - "name": "custom_worker_group_launch_template_iam_instance_profile", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "data", - "type": "aws_iam_instance_profile", - "name": "custom_worker_group_launch_template_mixed_iam_instance_profile", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "data", - "type": "aws_iam_policy_document", - "name": "cluster_assume_role_policy", - "provider": "provider.aws", - "instances": [ - { - "schema_version": 0, - "attributes": { - "id": "2764486067", - "json": "{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"EKSClusterAssumeRole\",\n \"Effect\": \"Allow\",\n \"Action\": \"sts:AssumeRole\",\n \"Principal\": {\n \"Service\": \"eks.amazonaws.com\"\n }\n }\n ]\n}", - "override_json": null, - "policy_id": null, - "source_json": null, - "statement": [ - { - "actions": [ - "sts:AssumeRole" - ], - "condition": [], - "effect": "Allow", - "not_actions": [], - "not_principals": [], - "not_resources": [], - "principals": [ - { - "identifiers": [ - "eks.amazonaws.com" - ], - "type": "Service" - } - ], - "resources": [], - "sid": "EKSClusterAssumeRole" - } - ], - "version": "2012-10-17" - } - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "data", - "type": "aws_iam_policy_document", - "name": "worker_autoscaling", - "provider": "provider.aws", - "instances": [ - { - "schema_version": 0, - "attributes": { - "id": "2336810661", - "json": "{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"eksWorkerAutoscalingAll\",\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ec2:DescribeLaunchTemplateVersions\",\n \"autoscaling:DescribeTags\",\n \"autoscaling:DescribeLaunchConfigurations\",\n \"autoscaling:DescribeAutoScalingInstances\",\n \"autoscaling:DescribeAutoScalingGroups\"\n ],\n \"Resource\": \"*\"\n },\n {\n \"Sid\": \"eksWorkerAutoscalingOwn\",\n \"Effect\": \"Allow\",\n \"Action\": [\n \"autoscaling:UpdateAutoScalingGroup\",\n \"autoscaling:TerminateInstanceInAutoScalingGroup\",\n \"autoscaling:SetDesiredCapacity\"\n ],\n \"Resource\": \"*\",\n \"Condition\": {\n \"StringEquals\": {\n \"autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled\": \"true\",\n \"autoscaling:ResourceTag/kubernetes.io/cluster/my-cluster\": \"owned\"\n }\n }\n }\n ]\n}", - "override_json": null, - "policy_id": null, - "source_json": null, - "statement": [ - { - "actions": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "ec2:DescribeLaunchTemplateVersions" - ], - "condition": [], - "effect": "Allow", - "not_actions": [], - "not_principals": [], - "not_resources": [], - "principals": [], - "resources": [ - "*" - ], - "sid": "eksWorkerAutoscalingAll" - }, - { - "actions": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "autoscaling:UpdateAutoScalingGroup" - ], - "condition": [ - { - "test": "StringEquals", - "values": [ - "owned" - ], - "variable": "autoscaling:ResourceTag/kubernetes.io/cluster/my-cluster" - }, - { - "test": "StringEquals", - "values": [ - "true" - ], - "variable": "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled" - } - ], - "effect": "Allow", - "not_actions": [], - "not_principals": [], - "not_resources": [], - "principals": [], - "resources": [ - "*" - ], - "sid": "eksWorkerAutoscalingOwn" - } - ], - "version": "2012-10-17" - }, - "depends_on": [ - "aws_eks_cluster.this" - ] - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "data", - "type": "aws_iam_policy_document", - "name": "workers_assume_role_policy", - "provider": "provider.aws", - "instances": [ - { - "schema_version": 0, - "attributes": { - "id": "3778018924", - "json": "{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"EKSWorkerAssumeRole\",\n \"Effect\": \"Allow\",\n \"Action\": \"sts:AssumeRole\",\n \"Principal\": {\n \"Service\": \"ec2.amazonaws.com\"\n }\n }\n ]\n}", - "override_json": null, - "policy_id": null, - "source_json": null, - "statement": [ - { - "actions": [ - "sts:AssumeRole" - ], - "condition": [], - "effect": "Allow", - "not_actions": [], - "not_principals": [], - "not_resources": [], - "principals": [ - { - "identifiers": [ - "ec2.amazonaws.com" - ], - "type": "Service" - } - ], - "resources": [], - "sid": "EKSWorkerAssumeRole" - } - ], - "version": "2012-10-17" - } - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "data", - "type": "aws_iam_role", - "name": "custom_cluster_iam_role", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "data", - "type": "aws_region", - "name": "current", - "provider": "provider.aws", - "instances": [ - { - "schema_version": 0, - "attributes": { - "current": null, - "description": "US West (Oregon)", - "endpoint": "ec2.us-west-2.amazonaws.com", - "id": "us-west-2", - "name": "us-west-2" - } - } - ] - }, - { - "module": "module.vpc", - "mode": "data", - "type": "aws_vpc_endpoint_service", - "name": "apigw", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "data", - "type": "aws_vpc_endpoint_service", - "name": "cloudtrail", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "data", - "type": "aws_vpc_endpoint_service", - "name": "dynamodb", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "data", - "type": "aws_vpc_endpoint_service", - "name": "ec2", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "data", - "type": "aws_vpc_endpoint_service", - "name": "ec2messages", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "data", - "type": "aws_vpc_endpoint_service", - "name": "ecr_api", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "data", - "type": "aws_vpc_endpoint_service", - "name": "ecr_dkr", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "data", - "type": "aws_vpc_endpoint_service", - "name": "ecs", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "data", - "type": "aws_vpc_endpoint_service", - "name": "ecs_agent", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "data", - "type": "aws_vpc_endpoint_service", - "name": "ecs_telemetry", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "data", - "type": "aws_vpc_endpoint_service", - "name": "elasticloadbalancing", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "data", - "type": "aws_vpc_endpoint_service", - "name": "events", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "data", - "type": "aws_vpc_endpoint_service", - "name": "kms", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "data", - "type": "aws_vpc_endpoint_service", - "name": "logs", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "data", - "type": "aws_vpc_endpoint_service", - "name": "monitoring", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "data", - "type": "aws_vpc_endpoint_service", - "name": "s3", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "data", - "type": "aws_vpc_endpoint_service", - "name": "sns", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "data", - "type": "aws_vpc_endpoint_service", - "name": "sqs", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "data", - "type": "aws_vpc_endpoint_service", - "name": "ssm", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "data", - "type": "aws_vpc_endpoint_service", - "name": "ssmmessages", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.default-cluster", - "mode": "data", - "type": "external", - "name": "monitor_elb", - "provider": "provider.external", - "instances": [] - }, - { - "module": "module.test-cluster", - "mode": "data", - "type": "external", - "name": "monitor_elb", - "provider": "provider.external", - "instances": [] - }, - { - "module": "module.default-cluster", - "mode": "data", - "type": "external", - "name": "tidb_elb", - "provider": "provider.external", - "instances": [] - }, - { - "module": "module.test-cluster", - "mode": "data", - "type": "external", - "name": "tidb_elb", - "provider": "provider.external", - "instances": [] - }, - { - "module": "module.default-cluster", - "mode": "data", - "type": "helm_repository", - "name": "pingcap", - "provider": "provider.helm.eks", - "instances": [] - }, - { - "module": "module.test-cluster", - "mode": "data", - "type": "helm_repository", - "name": "pingcap", - "provider": "provider.helm.eks", - "instances": [] - }, - { - "module": "module.tidb-operator", - "mode": "data", - "type": "helm_repository", - "name": "pingcap", - "provider": "module.tidb-operator.provider.helm", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "data", - "type": "template_file", - "name": "aws_authenticator_env_variables", - "each": "list", - "provider": "provider.template", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "data", - "type": "template_file", - "name": "config_map_aws_auth", - "provider": "provider.template", - "instances": [ - { - "schema_version": 0, - "attributes": { - "filename": null, - "id": "e30bd200646bbb687a3629838542cd2efd20aa6f4fe3dca6cc606fa4895ba608", - "rendered": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: aws-auth\n namespace: kube-system\ndata:\n mapRoles: |\n - rolearn: arn:aws:iam::385595570414:role/my-cluster20190702141950357100000005\n username: system:node:{{EC2PrivateDNSName}}\n groups:\n - system:bootstrappers\n - system:nodes\n\n\n mapUsers: |\n\n mapAccounts: |\n\n", - "template": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: aws-auth\n namespace: kube-system\ndata:\n mapRoles: |\n${worker_role_arn}\n${map_roles}\n mapUsers: |\n${map_users}\n mapAccounts: |\n${map_accounts}\n", - "vars": { - "map_accounts": "", - "map_roles": "", - "map_users": "", - "worker_role_arn": " - rolearn: arn:aws:iam::385595570414:role/my-cluster20190702141950357100000005\n username: system:node:{{EC2PrivateDNSName}}\n groups:\n - system:bootstrappers\n - system:nodes\n" - } - }, - "depends_on": [ - "data.template_file.launch_template_mixed_worker_role_arns", - "data.template_file.launch_template_worker_role_arns", - "data.template_file.map_accounts", - "data.template_file.map_roles", - "data.template_file.map_users", - "data.template_file.worker_role_arns" - ] - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "data", - "type": "template_file", - "name": "kubeconfig", - "provider": "provider.template", - "instances": [ - { - "schema_version": 0, - "attributes": { - "filename": null, - "id": "271318f91812c79d01834167167c38fb85a7b7fe82bc51b092b967bb138dc5a4", - "rendered": "apiVersion: v1\npreferences: {}\nkind: Config\n\nclusters:\n- cluster:\n server: https://2B09ED2329DF4012CC0A6705BFB14965.yl4.us-west-2.eks.amazonaws.com\n certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNU1EY3dNakUwTVRZMU9Wb1hEVEk1TURZeU9URTBNVFkxT1Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS0J4CmVKdlEzSldFbkZqckFBRVFVTDFUS0FTRURxaUt5QXlwd2JlN0UwVE5jQnBqR1kvUzFTZnRZMlZEOGtQbnFncjMKOHFuYXl3d1cvOERYdjEvN0t5OHdsU25GT2xuTjZZWk5vUm9jRFE2V1JNZ2Uzc3p5MFh3Ly9yWW5CQm8xeitFZQpHNnNVNzlXQjl4K1VKdWJNaWJ5NW9YYXNqUUNtZFVHOWkzVklMQTJHb1pGTnN0d1VOUTlTSUhaRlpIWkltVFRrCmdqcU11T3dxSGZJa3VDNlNDREUwTnhtSUVrWU8wb1J6WVdISCt6OEsxQjFhMGF2c1p4NzJGZjJ1UE5oTlE1RU0KQy95djFmL3daTDE2RHJBMURUTFJSWFlnbXQyL2lvQVRWRFpQdWhJQzkzWHNZMHI1blYrd2xWQml0NVZvYTJ5NAp6dXphdWhEM1gzMTBrZVBhUXVjQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFJTDBObG1CRysrdjl3bjd0eHpiaGxoeWVOM2MKbUE3NTNUSTBROWlGenJ3SERkV3J3TnMrbW9XSzlRbUJhOG1YTmhyZUpIYXpPUWt0cUNHQkM4OFZkeXhHZE9PZwo2SWVQSnA2NW83YzRYSXdTY1lkai9wWXpLeHZXaS82Zi9aa000TzAxQUpUZkVaWWVEbmpmaXZkaGtzdkd2VVFuCjI5TDZmQkg2dXh5a3k0cVJraW9kTkppMHVIazdaNUVYNStMciszNzhBeHBKOEJVMFpwbk1sNVhNUFR1bzE5VysKR3B5UElVQXBUamtQdC9NemQ4czdyWDJmQWRaYW9vUUNvNEw4dkpGWHdudDBHUUpzL3c5S2tiYXcxY00xcXRZTQpFa3hDQno5M0l1OVhyUFVoMG9tdXVyR2d5akRLdUJoTkZCby8wOGMvZmx6UVlSVGRCRmpXNTY5c3VVQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=\n name: eks_my-cluster\n\ncontexts:\n- context:\n cluster: eks_my-cluster\n user: eks_my-cluster\n name: eks_my-cluster\n\ncurrent-context: eks_my-cluster\n\nusers:\n- name: eks_my-cluster\n user:\n exec:\n apiVersion: client.authentication.k8s.io/v1alpha1\n command: aws-iam-authenticator\n args:\n - \"token\"\n - \"-i\"\n - \"my-cluster\"\n\n\n", - "template": "apiVersion: v1\npreferences: {}\nkind: Config\n\nclusters:\n- cluster:\n server: ${endpoint}\n certificate-authority-data: ${cluster_auth_base64}\n name: ${kubeconfig_name}\n\ncontexts:\n- context:\n cluster: ${kubeconfig_name}\n user: ${kubeconfig_name}\n name: ${kubeconfig_name}\n\ncurrent-context: ${kubeconfig_name}\n\nusers:\n- name: ${kubeconfig_name}\n user:\n exec:\n apiVersion: client.authentication.k8s.io/v1alpha1\n command: ${aws_authenticator_command}\n args:\n${aws_authenticator_command_args}\n${aws_authenticator_additional_args}\n${aws_authenticator_env_variables}\n", - "vars": { - "aws_authenticator_additional_args": "", - "aws_authenticator_command": "aws-iam-authenticator", - "aws_authenticator_command_args": " - \"token\"\n - \"-i\"\n - \"my-cluster\"", - "aws_authenticator_env_variables": "", - "cluster_auth_base64": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNU1EY3dNakUwTVRZMU9Wb1hEVEk1TURZeU9URTBNVFkxT1Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS0J4CmVKdlEzSldFbkZqckFBRVFVTDFUS0FTRURxaUt5QXlwd2JlN0UwVE5jQnBqR1kvUzFTZnRZMlZEOGtQbnFncjMKOHFuYXl3d1cvOERYdjEvN0t5OHdsU25GT2xuTjZZWk5vUm9jRFE2V1JNZ2Uzc3p5MFh3Ly9yWW5CQm8xeitFZQpHNnNVNzlXQjl4K1VKdWJNaWJ5NW9YYXNqUUNtZFVHOWkzVklMQTJHb1pGTnN0d1VOUTlTSUhaRlpIWkltVFRrCmdqcU11T3dxSGZJa3VDNlNDREUwTnhtSUVrWU8wb1J6WVdISCt6OEsxQjFhMGF2c1p4NzJGZjJ1UE5oTlE1RU0KQy95djFmL3daTDE2RHJBMURUTFJSWFlnbXQyL2lvQVRWRFpQdWhJQzkzWHNZMHI1blYrd2xWQml0NVZvYTJ5NAp6dXphdWhEM1gzMTBrZVBhUXVjQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFJTDBObG1CRysrdjl3bjd0eHpiaGxoeWVOM2MKbUE3NTNUSTBROWlGenJ3SERkV3J3TnMrbW9XSzlRbUJhOG1YTmhyZUpIYXpPUWt0cUNHQkM4OFZkeXhHZE9PZwo2SWVQSnA2NW83YzRYSXdTY1lkai9wWXpLeHZXaS82Zi9aa000TzAxQUpUZkVaWWVEbmpmaXZkaGtzdkd2VVFuCjI5TDZmQkg2dXh5a3k0cVJraW9kTkppMHVIazdaNUVYNStMciszNzhBeHBKOEJVMFpwbk1sNVhNUFR1bzE5VysKR3B5UElVQXBUamtQdC9NemQ4czdyWDJmQWRaYW9vUUNvNEw4dkpGWHdudDBHUUpzL3c5S2tiYXcxY00xcXRZTQpFa3hDQno5M0l1OVhyUFVoMG9tdXVyR2d5akRLdUJoTkZCby8wOGMvZmx6UVlSVGRCRmpXNTY5c3VVQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=", - "endpoint": "https://2B09ED2329DF4012CC0A6705BFB14965.yl4.us-west-2.eks.amazonaws.com", - "kubeconfig_name": "eks_my-cluster", - "region": "us-west-2" - } - }, - "depends_on": [ - "aws_eks_cluster.this", - "data.aws_region.current", - "data.template_file.aws_authenticator_env_variables" - ] - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "data", - "type": "template_file", - "name": "launch_template_mixed_worker_role_arns", - "each": "list", - "provider": "provider.template", - "instances": [] - }, - { - "module": "module.default-cluster", - "mode": "data", - "type": "template_file", - "name": "launch_template_userdata", - "each": "list", - "provider": "provider.template", - "instances": [] - }, - { - "module": "module.test-cluster", - "mode": "data", - "type": "template_file", - "name": "launch_template_userdata", - "each": "list", - "provider": "provider.template", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "data", - "type": "template_file", - "name": "launch_template_userdata", - "each": "list", - "provider": "provider.template", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "data", - "type": "template_file", - "name": "launch_template_worker_role_arns", - "each": "list", - "provider": "provider.template", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "data", - "type": "template_file", - "name": "map_accounts", - "each": "list", - "provider": "provider.template", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "data", - "type": "template_file", - "name": "map_roles", - "each": "list", - "provider": "provider.template", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "data", - "type": "template_file", - "name": "map_users", - "each": "list", - "provider": "provider.template", - "instances": [] - }, - { - "module": "module.default-cluster", - "mode": "data", - "type": "template_file", - "name": "userdata", - "each": "list", - "provider": "provider.template", - "instances": [] - }, - { - "module": "module.test-cluster", - "mode": "data", - "type": "template_file", - "name": "userdata", - "each": "list", - "provider": "provider.template", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "data", - "type": "template_file", - "name": "userdata", - "each": "list", - "provider": "provider.template", - "instances": [ - { - "index_key": 0, - "schema_version": 0, - "attributes": { - "filename": null, - "id": "4a99c387d5c0bc4b5f2dbea53b336e9dd3bd912664fab7c8261b0394938c1ce9", - "rendered": "#!/bin/bash -xe\n\n# Allow user supplied pre userdata code\n\n\n# Bootstrap and join the cluster\n/etc/eks/bootstrap.sh --b64-cluster-ca 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNU1EY3dNakUwTVRZMU9Wb1hEVEk1TURZeU9URTBNVFkxT1Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS0J4CmVKdlEzSldFbkZqckFBRVFVTDFUS0FTRURxaUt5QXlwd2JlN0UwVE5jQnBqR1kvUzFTZnRZMlZEOGtQbnFncjMKOHFuYXl3d1cvOERYdjEvN0t5OHdsU25GT2xuTjZZWk5vUm9jRFE2V1JNZ2Uzc3p5MFh3Ly9yWW5CQm8xeitFZQpHNnNVNzlXQjl4K1VKdWJNaWJ5NW9YYXNqUUNtZFVHOWkzVklMQTJHb1pGTnN0d1VOUTlTSUhaRlpIWkltVFRrCmdqcU11T3dxSGZJa3VDNlNDREUwTnhtSUVrWU8wb1J6WVdISCt6OEsxQjFhMGF2c1p4NzJGZjJ1UE5oTlE1RU0KQy95djFmL3daTDE2RHJBMURUTFJSWFlnbXQyL2lvQVRWRFpQdWhJQzkzWHNZMHI1blYrd2xWQml0NVZvYTJ5NAp6dXphdWhEM1gzMTBrZVBhUXVjQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFJTDBObG1CRysrdjl3bjd0eHpiaGxoeWVOM2MKbUE3NTNUSTBROWlGenJ3SERkV3J3TnMrbW9XSzlRbUJhOG1YTmhyZUpIYXpPUWt0cUNHQkM4OFZkeXhHZE9PZwo2SWVQSnA2NW83YzRYSXdTY1lkai9wWXpLeHZXaS82Zi9aa000TzAxQUpUZkVaWWVEbmpmaXZkaGtzdkd2VVFuCjI5TDZmQkg2dXh5a3k0cVJraW9kTkppMHVIazdaNUVYNStMciszNzhBeHBKOEJVMFpwbk1sNVhNUFR1bzE5VysKR3B5UElVQXBUamtQdC9NemQ4czdyWDJmQWRaYW9vUUNvNEw4dkpGWHdudDBHUUpzL3c5S2tiYXcxY00xcXRZTQpFa3hDQno5M0l1OVhyUFVoMG9tdXVyR2d5akRLdUJoTkZCby8wOGMvZmx6UVlSVGRCRmpXNTY5c3VVQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=' --apiserver-endpoint 'https://2B09ED2329DF4012CC0A6705BFB14965.yl4.us-west-2.eks.amazonaws.com' --kubelet-extra-args '' 'my-cluster'\n\n# Allow user supplied userdata code\n\n", - "template": "#!/bin/bash -xe\n\n# Allow user supplied pre userdata code\n${pre_userdata}\n\n# Bootstrap and join the cluster\n/etc/eks/bootstrap.sh --b64-cluster-ca '${cluster_auth_base64}' --apiserver-endpoint '${endpoint}' ${bootstrap_extra_args} --kubelet-extra-args '${kubelet_extra_args}' '${cluster_name}'\n\n# Allow user supplied userdata code\n${additional_userdata}\n", - "vars": { - "additional_userdata": "", - "bootstrap_extra_args": "", - "cluster_auth_base64": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNU1EY3dNakUwTVRZMU9Wb1hEVEk1TURZeU9URTBNVFkxT1Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS0J4CmVKdlEzSldFbkZqckFBRVFVTDFUS0FTRURxaUt5QXlwd2JlN0UwVE5jQnBqR1kvUzFTZnRZMlZEOGtQbnFncjMKOHFuYXl3d1cvOERYdjEvN0t5OHdsU25GT2xuTjZZWk5vUm9jRFE2V1JNZ2Uzc3p5MFh3Ly9yWW5CQm8xeitFZQpHNnNVNzlXQjl4K1VKdWJNaWJ5NW9YYXNqUUNtZFVHOWkzVklMQTJHb1pGTnN0d1VOUTlTSUhaRlpIWkltVFRrCmdqcU11T3dxSGZJa3VDNlNDREUwTnhtSUVrWU8wb1J6WVdISCt6OEsxQjFhMGF2c1p4NzJGZjJ1UE5oTlE1RU0KQy95djFmL3daTDE2RHJBMURUTFJSWFlnbXQyL2lvQVRWRFpQdWhJQzkzWHNZMHI1blYrd2xWQml0NVZvYTJ5NAp6dXphdWhEM1gzMTBrZVBhUXVjQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFJTDBObG1CRysrdjl3bjd0eHpiaGxoeWVOM2MKbUE3NTNUSTBROWlGenJ3SERkV3J3TnMrbW9XSzlRbUJhOG1YTmhyZUpIYXpPUWt0cUNHQkM4OFZkeXhHZE9PZwo2SWVQSnA2NW83YzRYSXdTY1lkai9wWXpLeHZXaS82Zi9aa000TzAxQUpUZkVaWWVEbmpmaXZkaGtzdkd2VVFuCjI5TDZmQkg2dXh5a3k0cVJraW9kTkppMHVIazdaNUVYNStMciszNzhBeHBKOEJVMFpwbk1sNVhNUFR1bzE5VysKR3B5UElVQXBUamtQdC9NemQ4czdyWDJmQWRaYW9vUUNvNEw4dkpGWHdudDBHUUpzL3c5S2tiYXcxY00xcXRZTQpFa3hDQno5M0l1OVhyUFVoMG9tdXVyR2d5akRLdUJoTkZCby8wOGMvZmx6UVlSVGRCRmpXNTY5c3VVQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=", - "cluster_name": "my-cluster", - "endpoint": "https://2B09ED2329DF4012CC0A6705BFB14965.yl4.us-west-2.eks.amazonaws.com", - "kubelet_extra_args": "", - "pre_userdata": "" - } - }, - "depends_on": [ - "aws_eks_cluster.this" - ] - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "data", - "type": "template_file", - "name": "worker_role_arns", - "each": "list", - "provider": "provider.template", - "instances": [ - { - "index_key": 0, - "schema_version": 0, - "attributes": { - "filename": null, - "id": "43e544b6f14ee17b5fe2c31f073e3ea86423a3b01f4f5733a32fc3e21b0326d2", - "rendered": " - rolearn: arn:aws:iam::385595570414:role/my-cluster20190702141950357100000005\n username: system:node:{{EC2PrivateDNSName}}\n groups:\n - system:bootstrappers\n - system:nodes\n", - "template": " - rolearn: ${worker_role_arn}\n username: system:node:{{EC2PrivateDNSName}}\n groups:\n - system:bootstrappers\n - system:nodes\n", - "vars": { - "worker_role_arn": "arn:aws:iam::385595570414:role/my-cluster20190702141950357100000005" - } - }, - "depends_on": [ - "aws_iam_instance_profile.workers", - "data.aws_caller_identity.current", - "data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile" - ] - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "data", - "type": "template_file", - "name": "workers_launch_template_mixed", - "each": "list", - "provider": "provider.template", - "instances": [] - }, - { - "module": "module.default-cluster", - "mode": "managed", - "type": "aws_autoscaling_group", - "name": "workers", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.test-cluster", - "mode": "managed", - "type": "aws_autoscaling_group", - "name": "workers", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_autoscaling_group", - "name": "workers", - "each": "list", - "provider": "provider.aws", - "instances": [ - { - "index_key": 0, - "schema_version": 0, - "attributes": { - "arn": "arn:aws:autoscaling:us-west-2:385595570414:autoScalingGroup:af99cd65-847c-402b-8d7a-834df71b543b:autoScalingGroupName/my-cluster-my-cluster-control2019070214202285200000000e", - "availability_zones": [ - "us-west-2a", - "us-west-2b", - "us-west-2c" - ], - "default_cooldown": 300, - "desired_capacity": 1, - "enabled_metrics": [], - "force_delete": false, - "health_check_grace_period": 300, - "health_check_type": "EC2", - "id": "my-cluster-my-cluster-control2019070214202285200000000e", - "initial_lifecycle_hook": [], - "launch_configuration": "my-cluster-my-cluster-control2019070214201744850000000d", - "launch_template": [], - "load_balancers": [], - "max_size": 3, - "metrics_granularity": "1Minute", - "min_elb_capacity": null, - "min_size": 1, - "mixed_instances_policy": [], - "name": "my-cluster-my-cluster-control2019070214202285200000000e", - "name_prefix": "my-cluster-my-cluster-control", - "placement_group": "", - "protect_from_scale_in": false, - "service_linked_role_arn": "arn:aws:iam::385595570414:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", - "suspended_processes": [ - "AZRebalance" - ], - "tag": [], - "tags": [ - { - "key": "Name", - "propagate_at_launch": "true", - "value": "my-cluster-my-cluster-control-eks_asg" - }, - { - "key": "kubernetes.io/cluster/my-cluster", - "propagate_at_launch": "true", - "value": "owned" - }, - { - "key": "k8s.io/cluster-autoscaler/disabled", - "propagate_at_launch": "false", - "value": "true" - }, - { - "key": "k8s.io/cluster-autoscaler/my-cluster", - "propagate_at_launch": "false", - "value": "my-cluster" - }, - { - "key": "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage", - "propagate_at_launch": "false", - "value": "100Gi" - }, - { - "key": "app", - "propagate_at_launch": "true", - "value": "tidb" - } - ], - "target_group_arns": [], - "termination_policies": [], - "timeouts": null, - "vpc_zone_identifier": [ - "subnet-01c62f5324fe05605", - "subnet-0869b629c725c799c", - "subnet-08b8c355f30ed58ba" - ], - "wait_for_capacity_timeout": "10m", - "wait_for_elb_capacity": null - }, - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiZGVsZXRlIjo2MDAwMDAwMDAwMDB9fQ==", - "depends_on": [ - "aws_eks_cluster.this", - "aws_launch_configuration.workers" - ] - } - ] - }, - { - "module": "module.default-cluster", - "mode": "managed", - "type": "aws_autoscaling_group", - "name": "workers_launch_template", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.test-cluster", - "mode": "managed", - "type": "aws_autoscaling_group", - "name": "workers_launch_template", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_autoscaling_group", - "name": "workers_launch_template", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_autoscaling_group", - "name": "workers_launch_template_mixed", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_cloudwatch_log_group", - "name": "this", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_db_subnet_group", - "name": "database", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_default_network_acl", - "name": "this", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_default_vpc", - "name": "this", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_eip", - "name": "nat", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_eks_cluster", - "name": "this", - "provider": "provider.aws", - "instances": [ - { - "schema_version": 0, - "attributes": { - "arn": "arn:aws:eks:us-west-2:385595570414:cluster/my-cluster", - "certificate_authority": [ - { - "data": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNU1EY3dNakUwTVRZMU9Wb1hEVEk1TURZeU9URTBNVFkxT1Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS0J4CmVKdlEzSldFbkZqckFBRVFVTDFUS0FTRURxaUt5QXlwd2JlN0UwVE5jQnBqR1kvUzFTZnRZMlZEOGtQbnFncjMKOHFuYXl3d1cvOERYdjEvN0t5OHdsU25GT2xuTjZZWk5vUm9jRFE2V1JNZ2Uzc3p5MFh3Ly9yWW5CQm8xeitFZQpHNnNVNzlXQjl4K1VKdWJNaWJ5NW9YYXNqUUNtZFVHOWkzVklMQTJHb1pGTnN0d1VOUTlTSUhaRlpIWkltVFRrCmdqcU11T3dxSGZJa3VDNlNDREUwTnhtSUVrWU8wb1J6WVdISCt6OEsxQjFhMGF2c1p4NzJGZjJ1UE5oTlE1RU0KQy95djFmL3daTDE2RHJBMURUTFJSWFlnbXQyL2lvQVRWRFpQdWhJQzkzWHNZMHI1blYrd2xWQml0NVZvYTJ5NAp6dXphdWhEM1gzMTBrZVBhUXVjQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFJTDBObG1CRysrdjl3bjd0eHpiaGxoeWVOM2MKbUE3NTNUSTBROWlGenJ3SERkV3J3TnMrbW9XSzlRbUJhOG1YTmhyZUpIYXpPUWt0cUNHQkM4OFZkeXhHZE9PZwo2SWVQSnA2NW83YzRYSXdTY1lkai9wWXpLeHZXaS82Zi9aa000TzAxQUpUZkVaWWVEbmpmaXZkaGtzdkd2VVFuCjI5TDZmQkg2dXh5a3k0cVJraW9kTkppMHVIazdaNUVYNStMciszNzhBeHBKOEJVMFpwbk1sNVhNUFR1bzE5VysKR3B5UElVQXBUamtQdC9NemQ4czdyWDJmQWRaYW9vUUNvNEw4dkpGWHdudDBHUUpzL3c5S2tiYXcxY00xcXRZTQpFa3hDQno5M0l1OVhyUFVoMG9tdXVyR2d5akRLdUJoTkZCby8wOGMvZmx6UVlSVGRCRmpXNTY5c3VVQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=" - } - ], - "created_at": "2019-07-02 14:09:27 +0000 UTC", - "enabled_cluster_log_types": [], - "endpoint": "https://2B09ED2329DF4012CC0A6705BFB14965.yl4.us-west-2.eks.amazonaws.com", - "id": "my-cluster", - "name": "my-cluster", - "platform_version": "eks.2", - "role_arn": "arn:aws:iam::385595570414:role/my-cluster20190702140850744000000001", - "timeouts": { - "create": "15m", - "delete": "15m", - "update": null - }, - "version": "1.12", - "vpc_config": [ - { - "endpoint_private_access": false, - "endpoint_public_access": true, - "security_group_ids": [ - "sg-049b5a71010e17d3f" - ], - "subnet_ids": [ - "subnet-01c62f5324fe05605", - "subnet-0869b629c725c799c", - "subnet-08b8c355f30ed58ba" - ], - "vpc_id": "vpc-045ead8290dd948d4" - } - ] - }, - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo5MDAwMDAwMDAwMDAsImRlbGV0ZSI6OTAwMDAwMDAwMDAwLCJ1cGRhdGUiOjM2MDAwMDAwMDAwMDB9fQ==", - "depends_on": [ - "aws_cloudwatch_log_group.this", - "aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy", - "aws_iam_role_policy_attachment.cluster_AmazonEKSServicePolicy" - ] - } - ] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_elasticache_subnet_group", - "name": "elasticache", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_iam_instance_profile", - "name": "workers", - "each": "list", - "provider": "provider.aws", - "instances": [ - { - "index_key": 0, - "schema_version": 0, - "attributes": { - "arn": "arn:aws:iam::385595570414:instance-profile/my-cluster20190702141952556900000008", - "create_date": "2019-07-02T14:19:53Z", - "id": "my-cluster20190702141952556900000008", - "name": "my-cluster20190702141952556900000008", - "name_prefix": "my-cluster", - "path": "/", - "role": "my-cluster20190702141950357100000005", - "roles": [ - "my-cluster20190702141950357100000005" - ], - "unique_id": "AIPAVTR2JPDXMT2SSY5OY" - }, - "private": "bnVsbA==", - "depends_on": [ - "aws_eks_cluster.this" - ] - } - ] - }, - { - "module": "module.default-cluster", - "mode": "managed", - "type": "aws_iam_instance_profile", - "name": "workers_launch_template", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.test-cluster", - "mode": "managed", - "type": "aws_iam_instance_profile", - "name": "workers_launch_template", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_iam_instance_profile", - "name": "workers_launch_template", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_iam_instance_profile", - "name": "workers_launch_template_mixed", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_iam_policy", - "name": "worker_autoscaling", - "each": "list", - "provider": "provider.aws", - "instances": [ - { - "index_key": 0, - "schema_version": 0, - "attributes": { - "arn": "arn:aws:iam::385595570414:policy/eks-worker-autoscaling-my-cluster20190702141950399500000007", - "description": "EKS worker node autoscaling policy for cluster my-cluster", - "id": "arn:aws:iam::385595570414:policy/eks-worker-autoscaling-my-cluster20190702141950399500000007", - "name": "eks-worker-autoscaling-my-cluster20190702141950399500000007", - "name_prefix": "eks-worker-autoscaling-my-cluster", - "path": "/", - "policy": "{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"eksWorkerAutoscalingAll\",\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ec2:DescribeLaunchTemplateVersions\",\n \"autoscaling:DescribeTags\",\n \"autoscaling:DescribeLaunchConfigurations\",\n \"autoscaling:DescribeAutoScalingInstances\",\n \"autoscaling:DescribeAutoScalingGroups\"\n ],\n \"Resource\": \"*\"\n },\n {\n \"Sid\": \"eksWorkerAutoscalingOwn\",\n \"Effect\": \"Allow\",\n \"Action\": [\n \"autoscaling:UpdateAutoScalingGroup\",\n \"autoscaling:TerminateInstanceInAutoScalingGroup\",\n \"autoscaling:SetDesiredCapacity\"\n ],\n \"Resource\": \"*\",\n \"Condition\": {\n \"StringEquals\": {\n \"autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled\": \"true\",\n \"autoscaling:ResourceTag/kubernetes.io/cluster/my-cluster\": \"owned\"\n }\n }\n }\n ]\n}" - }, - "private": "bnVsbA==", - "depends_on": [ - "aws_eks_cluster.this", - "data.aws_iam_policy_document.worker_autoscaling" - ] - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_iam_role", - "name": "cluster", - "each": "list", - "provider": "provider.aws", - "instances": [ - { - "index_key": 0, - "schema_version": 0, - "attributes": { - "arn": "arn:aws:iam::385595570414:role/my-cluster20190702140850744000000001", - "assume_role_policy": "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"EKSClusterAssumeRole\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"eks.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}", - "create_date": "2019-07-02T14:08:52Z", - "description": "", - "force_detach_policies": true, - "id": "my-cluster20190702140850744000000001", - "max_session_duration": 3600, - "name": "my-cluster20190702140850744000000001", - "name_prefix": "my-cluster", - "path": "/", - "permissions_boundary": null, - "tags": { - "app": "tidb" - }, - "unique_id": "AROAVTR2JPDXFAOM7QD2V" - }, - "private": "bnVsbA==", - "depends_on": [ - "data.aws_iam_policy_document.cluster_assume_role_policy" - ] - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_iam_role", - "name": "workers", - "each": "list", - "provider": "provider.aws", - "instances": [ - { - "index_key": 0, - "schema_version": 0, - "attributes": { - "arn": "arn:aws:iam::385595570414:role/my-cluster20190702141950357100000005", - "assume_role_policy": "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"EKSWorkerAssumeRole\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}", - "create_date": "2019-07-02T14:19:51Z", - "description": "", - "force_detach_policies": true, - "id": "my-cluster20190702141950357100000005", - "max_session_duration": 3600, - "name": "my-cluster20190702141950357100000005", - "name_prefix": "my-cluster", - "path": "/", - "permissions_boundary": null, - "tags": {}, - "unique_id": "AROAVTR2JPDXOBSTW46SB" - }, - "private": "bnVsbA==", - "depends_on": [ - "aws_eks_cluster.this", - "data.aws_iam_policy_document.workers_assume_role_policy" - ] - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_iam_role_policy_attachment", - "name": "cluster_AmazonEKSClusterPolicy", - "each": "list", - "provider": "provider.aws", - "instances": [ - { - "index_key": 0, - "schema_version": 0, - "attributes": { - "id": "my-cluster20190702140850744000000001-20190702140855882500000002", - "policy_arn": "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", - "role": "my-cluster20190702140850744000000001" - }, - "private": "bnVsbA==", - "depends_on": [ - "aws_iam_role.cluster[0]" - ] - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_iam_role_policy_attachment", - "name": "cluster_AmazonEKSServicePolicy", - "each": "list", - "provider": "provider.aws", - "instances": [ - { - "index_key": 0, - "schema_version": 0, - "attributes": { - "id": "my-cluster20190702140850744000000001-20190702140856941000000003", - "policy_arn": "arn:aws:iam::aws:policy/AmazonEKSServicePolicy", - "role": "my-cluster20190702140850744000000001" - }, - "private": "bnVsbA==", - "depends_on": [ - "aws_iam_role.cluster[0]" - ] - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_iam_role_policy_attachment", - "name": "workers_AmazonEC2ContainerRegistryReadOnly", - "each": "list", - "provider": "provider.aws", - "instances": [ - { - "index_key": 0, - "schema_version": 0, - "attributes": { - "id": "my-cluster20190702141950357100000005-2019070214195450610000000b", - "policy_arn": "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", - "role": "my-cluster20190702141950357100000005" - }, - "private": "bnVsbA==", - "depends_on": [ - "aws_iam_role.workers[0]" - ] - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_iam_role_policy_attachment", - "name": "workers_AmazonEKSWorkerNodePolicy", - "each": "list", - "provider": "provider.aws", - "instances": [ - { - "index_key": 0, - "schema_version": 0, - "attributes": { - "id": "my-cluster20190702141950357100000005-2019070214195361280000000a", - "policy_arn": "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", - "role": "my-cluster20190702141950357100000005" - }, - "private": "bnVsbA==", - "depends_on": [ - "aws_iam_role.workers[0]" - ] - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_iam_role_policy_attachment", - "name": "workers_AmazonEKS_CNI_Policy", - "each": "list", - "provider": "provider.aws", - "instances": [ - { - "index_key": 0, - "schema_version": 0, - "attributes": { - "id": "my-cluster20190702141950357100000005-20190702141953520500000009", - "policy_arn": "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", - "role": "my-cluster20190702141950357100000005" - }, - "private": "bnVsbA==", - "depends_on": [ - "aws_iam_role.workers[0]" - ] - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_iam_role_policy_attachment", - "name": "workers_additional_policies", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_iam_role_policy_attachment", - "name": "workers_autoscaling", - "each": "list", - "provider": "provider.aws", - "instances": [ - { - "index_key": 0, - "schema_version": 0, - "attributes": { - "id": "my-cluster20190702141950357100000005-2019070214200167710000000c", - "policy_arn": "arn:aws:iam::385595570414:policy/eks-worker-autoscaling-my-cluster20190702141950399500000007", - "role": "my-cluster20190702141950357100000005" - }, - "private": "bnVsbA==", - "depends_on": [ - "aws_iam_policy.worker_autoscaling[0]", - "aws_iam_role.workers[0]" - ] - } - ] - }, - { - "module": "module.ec2", - "mode": "managed", - "type": "aws_instance", - "name": "this", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.ec2", - "mode": "managed", - "type": "aws_instance", - "name": "this_t2", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_internet_gateway", - "name": "this", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.key-pair", - "mode": "managed", - "type": "aws_key_pair", - "name": "generated", - "provider": "provider.aws", - "instances": [ - { - "schema_version": 1, - "attributes": { - "fingerprint": "34:26:51:9e:09:d0:63:28:eb:5e:87:b6:f3:ea:24:bd", - "id": "my-cluster", - "key_name": "my-cluster", - "key_name_prefix": null, - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyw+DCyCDjwxIeR6joCJIplaCryriU7rX5jmOqi2LHGCvraiCthfZw3EQBc9ktgITXZ6OCLcR3mO6gub5mEzkRZQFUkF4DkiUxB/8emuqeJgS7ngq5XvDARqqCNT1N2e/cOp+LIeM4vCpfbZQcZzOzp87TSGF1x8OkyqDmqCW1wk0c98QbZiH2BJaLxO9hmrfVTAeUQPiZftn260Lqm0yENYuids0NpWD4usulE36ssRNxyxaNMPRA07vVa1IhG8GzMwKk5YACRJhdtZQEXFOFo0jNuFrR7FX+Dg8UN3YvxUATJIMr2UeMzWIjoRrXBd1LEH6JWxy1V2owrWPi4E5P" - }, - "private": "eyJzY2hlbWFfdmVyc2lvbiI6IjEifQ==", - "depends_on": [ - "tls_private_key.generated" - ] - } - ] - }, - { - "module": "module.default-cluster", - "mode": "managed", - "type": "aws_launch_configuration", - "name": "workers", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.test-cluster", - "mode": "managed", - "type": "aws_launch_configuration", - "name": "workers", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_launch_configuration", - "name": "workers", - "each": "list", - "provider": "provider.aws", - "instances": [ - { - "index_key": 0, - "schema_version": 0, - "attributes": { - "associate_public_ip_address": false, - "ebs_block_device": [], - "ebs_optimized": false, - "enable_monitoring": true, - "ephemeral_block_device": [], - "iam_instance_profile": "my-cluster20190702141952556900000008", - "id": "my-cluster-my-cluster-control2019070214201744850000000d", - "image_id": "ami-0f11fd98b02f12a4c", - "instance_type": "t2.xlarge", - "key_name": "my-cluster", - "name": "my-cluster-my-cluster-control2019070214201744850000000d", - "name_prefix": "my-cluster-my-cluster-control", - "placement_tenancy": null, - "root_block_device": [ - { - "delete_on_termination": true, - "iops": 0, - "volume_size": 100, - "volume_type": "gp2" - } - ], - "security_groups": [ - "sg-0901b76a8e3e7055e" - ], - "spot_price": "", - "user_data": null, - "user_data_base64": "IyEvYmluL2Jhc2ggLXhlCgojIEFsbG93IHVzZXIgc3VwcGxpZWQgcHJlIHVzZXJkYXRhIGNvZGUKCgojIEJvb3RzdHJhcCBhbmQgam9pbiB0aGUgY2x1c3RlcgovZXRjL2Vrcy9ib290c3RyYXAuc2ggLS1iNjQtY2x1c3Rlci1jYSAnTFMwdExTMUNSVWRKVGlCRFJWSlVTVVpKUTBGVVJTMHRMUzB0Q2sxSlNVTjVSRU5EUVdKRFowRjNTVUpCWjBsQ1FVUkJUa0puYTNGb2EybEhPWGN3UWtGUmMwWkJSRUZXVFZKTmQwVlJXVVJXVVZGRVJYZHdjbVJYU213S1kyMDFiR1JIVm5wTlFqUllSRlJGTlUxRVkzZE5ha1V3VFZSWk1VOVdiMWhFVkVrMVRVUlplVTlVUlRCTlZGa3hUMVp2ZDBaVVJWUk5Ra1ZIUVRGVlJRcEJlRTFMWVROV2FWcFlTblZhV0ZKc1kzcERRMEZUU1hkRVVWbEtTMjlhU1doMlkwNUJVVVZDUWxGQlJHZG5SVkJCUkVORFFWRnZRMmRuUlVKQlMwSjRDbVZLZGxFelNsZEZia1pxY2tGQlJWRlZUREZVUzBGVFJVUnhhVXQ1UVhsd2QySmxOMFV3VkU1alFuQnFSMWt2VXpGVFpuUlpNbFpFT0d0UWJuRm5jak1LT0hGdVlYbDNkMWN2T0VSWWRqRXZOMHQ1T0hkc1UyNUdUMnh1VGpaWldrNXZVbTlqUkZFMlYxSk5aMlV6YzNwNU1GaDNMeTl5V1c1Q1FtOHhlaXRGWlFwSE5uTlZOemxYUWpsNEsxVktkV0pOYVdKNU5XOVlZWE5xVVVOdFpGVkhPV2t6VmtsTVFUSkhiMXBHVG5OMGQxVk9VVGxUU1VoYVJscElXa2x0VkZSckNtZHFjVTExVDNkeFNHWkphM1ZETmxORFJFVXdUbmh0U1VWcldVOHdiMUo2V1ZkSVNDdDZPRXN4UWpGaE1HRjJjMXA0TnpKR1pqSjFVRTVvVGxFMVJVMEtReTk1ZGpGbUwzZGFUREUyUkhKQk1VUlVURkpTV0ZsbmJYUXlMMmx2UVZSV1JGcFFkV2hKUXpreldITlpNSEkxYmxZcmQyeFdRbWwwTlZadllUSjVOQXA2ZFhwaGRXaEVNMWd6TVRCclpWQmhVWFZqUTBGM1JVRkJZVTFxVFVORmQwUm5XVVJXVWpCUVFWRklMMEpCVVVSQlowdHJUVUU0UjBFeFZXUkZkMFZDQ2k5M1VVWk5RVTFDUVdZNGQwUlJXVXBMYjFwSmFIWmpUa0ZSUlV4Q1VVRkVaMmRGUWtGSlREQk9iRzFDUnlzcmRqbDNiamQwZUhwaWFHeG9lV1ZPTTJNS2JVRTNOVE5VU1RCUk9XbEdlbkozU0VSa1YzSjNUbk1yYlc5WFN6bFJiVUpoT0cxWVRtaHlaVXBJWVhwUFVXdDBjVU5IUWtNNE9GWmtlWGhIWkU5UFp3bzJTV1ZRU25BMk5XODNZelJZU1hkVFkxbGthaTl3V1hwTGVIWlhhUzgyWmk5YWEwMDBUekF4UVVwVVprVmFXV1ZFYm1wbWFYWmthR3R6ZGtkMlZWRnVDakk1VERabVFrZzJkWGg1YTNrMGNWSnJhVzlrVGtwcE1IVklhemRhTlVWWU5TdE1jaXN6TnpoQmVIQktPRUpWTUZwd2JrMXNOVmhOVUZSMWJ6RTVWeXNLUjNCNVVFbFZRWEJVYW10UWRDOU5lbVE0Y3pkeVdESm1RV1JhWVc5dlVVTnZORXc0ZGtwR1dIZHVkREJIVVVwekwzYzVTMnRpWVhjeFkwMHhjWFJaVFFwRmEzaERRbm81TTBsMU9WaHlVRlZvTUc5dGRYVnlSMmQ1YWtSTGRVSm9Ua1pDYnk4d09HTXZabXg2VVZsU1ZHUkNSbXBYTlRZNWMzVlZRVDBLTFMwdExTMUZUa1FnUTBWU1ZFbEdTVU5CVkVVdExTMHRMUW89JyAtLWFwaXNlcnZlci1lbmRwb2ludCAnaHR0cHM6Ly8yQjA5RUQyMzI5REY0MDEyQ0MwQTY3MDVCRkIxNDk2NS55bDQudXMtd2VzdC0yLmVrcy5hbWF6b25hd3MuY29tJyAgLS1rdWJlbGV0LWV4dHJhLWFyZ3MgJycgJ215LWNsdXN0ZXInCgojIEFsbG93IHVzZXIgc3VwcGxpZWQgdXNlcmRhdGEgY29kZQoK", - "vpc_classic_link_id": "", - "vpc_classic_link_security_groups": [] - }, - "private": "bnVsbA==", - "depends_on": [ - "aws_eks_cluster.this", - "aws_iam_instance_profile.workers", - "data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile", - "data.template_file.userdata" - ] - } - ] - }, - { - "module": "module.default-cluster", - "mode": "managed", - "type": "aws_launch_template", - "name": "workers_launch_template", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.test-cluster", - "mode": "managed", - "type": "aws_launch_template", - "name": "workers_launch_template", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_launch_template", - "name": "workers_launch_template", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_launch_template", - "name": "workers_launch_template_mixed", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_nat_gateway", - "name": "this", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_network_acl", - "name": "database", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_network_acl", - "name": "elasticache", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_network_acl", - "name": "intra", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_network_acl", - "name": "private", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_network_acl", - "name": "public", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_network_acl", - "name": "redshift", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_network_acl_rule", - "name": "database_inbound", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_network_acl_rule", - "name": "database_outbound", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_network_acl_rule", - "name": "elasticache_inbound", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_network_acl_rule", - "name": "elasticache_outbound", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_network_acl_rule", - "name": "intra_inbound", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_network_acl_rule", - "name": "intra_outbound", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_network_acl_rule", - "name": "private_inbound", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_network_acl_rule", - "name": "private_outbound", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_network_acl_rule", - "name": "public_inbound", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_network_acl_rule", - "name": "public_outbound", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_network_acl_rule", - "name": "redshift_inbound", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_network_acl_rule", - "name": "redshift_outbound", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_redshift_subnet_group", - "name": "redshift", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_route", - "name": "database_internet_gateway", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_route", - "name": "database_nat_gateway", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_route", - "name": "private_nat_gateway", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_route", - "name": "public_internet_gateway", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_route_table", - "name": "database", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_route_table", - "name": "elasticache", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_route_table", - "name": "intra", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_route_table", - "name": "private", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_route_table", - "name": "public", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_route_table", - "name": "redshift", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_route_table_association", - "name": "database", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_route_table_association", - "name": "elasticache", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_route_table_association", - "name": "intra", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_route_table_association", - "name": "private", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_route_table_association", - "name": "public", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_route_table_association", - "name": "redshift", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_route_table_association", - "name": "redshift_public", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_security_group", - "name": "cluster", - "each": "list", - "provider": "provider.aws", - "instances": [ - { - "index_key": 0, - "schema_version": 1, - "attributes": { - "arn": "arn:aws:ec2:us-west-2:385595570414:security-group/sg-049b5a71010e17d3f", - "description": "EKS cluster security group.", - "egress": [ - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "Allow cluster egress access to the Internet.", - "from_port": 0, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "-1", - "security_groups": [], - "self": false, - "to_port": 0 - } - ], - "id": "sg-049b5a71010e17d3f", - "ingress": [ - { - "cidr_blocks": [], - "description": "Allow pods to communicate with the EKS cluster API.", - "from_port": 443, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_groups": [ - "sg-0901b76a8e3e7055e" - ], - "self": false, - "to_port": 443 - } - ], - "name": "my-cluster20190702140914437300000004", - "name_prefix": "my-cluster", - "owner_id": "385595570414", - "revoke_rules_on_delete": false, - "tags": { - "Name": "my-cluster-eks_cluster_sg", - "app": "tidb" - }, - "timeouts": null, - "vpc_id": "vpc-045ead8290dd948d4" - }, - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6NjAwMDAwMDAwMDAwfSwic2NoZW1hX3ZlcnNpb24iOiIxIn0=" - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_security_group", - "name": "workers", - "each": "list", - "provider": "provider.aws", - "instances": [ - { - "index_key": 0, - "schema_version": 1, - "attributes": { - "arn": "arn:aws:ec2:us-west-2:385595570414:security-group/sg-0901b76a8e3e7055e", - "description": "Security group for all nodes in the cluster.", - "egress": [ - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "Allow nodes all egress to the Internet.", - "from_port": 0, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "-1", - "security_groups": [], - "self": false, - "to_port": 0 - } - ], - "id": "sg-0901b76a8e3e7055e", - "ingress": [ - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "kubernetes.io/rule/nlb/client=a0d6855d89cd511e9a9c20a756cd94eb", - "from_port": 31362, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_groups": [], - "self": false, - "to_port": 31362 - }, - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "kubernetes.io/rule/nlb/client=a0d6855d89cd511e9a9c20a756cd94eb", - "from_port": 31945, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_groups": [], - "self": false, - "to_port": 31945 - }, - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "kubernetes.io/rule/nlb/client=aff4a46c59cd411e9a9c20a756cd94eb", - "from_port": 31303, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_groups": [], - "self": false, - "to_port": 31303 - }, - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "kubernetes.io/rule/nlb/client=aff4a46c59cd411e9a9c20a756cd94eb", - "from_port": 31366, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_groups": [], - "self": false, - "to_port": 31366 - }, - { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "kubernetes.io/rule/nlb/mtu=aff4a46c59cd411e9a9c20a756cd94eb", - "from_port": 3, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "icmp", - "security_groups": [], - "self": false, - "to_port": 4 - }, - { - "cidr_blocks": [ - "10.0.0.0/16" - ], - "description": "kubernetes.io/rule/nlb/health=a0d6855d89cd511e9a9c20a756cd94eb", - "from_port": 31362, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_groups": [], - "self": false, - "to_port": 31362 - }, - { - "cidr_blocks": [ - "10.0.0.0/16" - ], - "description": "kubernetes.io/rule/nlb/health=a0d6855d89cd511e9a9c20a756cd94eb", - "from_port": 31945, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_groups": [], - "self": false, - "to_port": 31945 - }, - { - "cidr_blocks": [ - "10.0.0.0/16" - ], - "description": "kubernetes.io/rule/nlb/health=aff4a46c59cd411e9a9c20a756cd94eb", - "from_port": 31303, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_groups": [], - "self": false, - "to_port": 31303 - }, - { - "cidr_blocks": [ - "10.0.0.0/16" - ], - "description": "kubernetes.io/rule/nlb/health=aff4a46c59cd411e9a9c20a756cd94eb", - "from_port": 31366, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_groups": [], - "self": false, - "to_port": 31366 - }, - { - "cidr_blocks": [], - "description": "", - "from_port": 0, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "-1", - "security_groups": [ - "sg-02c1a918ddf291a13", - "sg-099c00ee2d9fe36ef" - ], - "self": false, - "to_port": 0 - }, - { - "cidr_blocks": [], - "description": "Allow node to communicate with each other.", - "from_port": 0, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "-1", - "security_groups": [], - "self": true, - "to_port": 0 - }, - { - "cidr_blocks": [], - "description": "Allow pods running extension API servers on port 443 to receive communication from cluster control plane.", - "from_port": 443, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_groups": [ - "sg-049b5a71010e17d3f" - ], - "self": false, - "to_port": 443 - }, - { - "cidr_blocks": [], - "description": "Allow workers pods to receive communication from the cluster control plane.", - "from_port": 1025, - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_groups": [ - "sg-049b5a71010e17d3f" - ], - "self": false, - "to_port": 65535 - } - ], - "name": "my-cluster20190702141950359500000006", - "name_prefix": "my-cluster", - "owner_id": "385595570414", - "revoke_rules_on_delete": false, - "tags": { - "Name": "my-cluster-eks_worker_sg", - "app": "tidb", - "kubernetes.io/cluster/my-cluster": "owned" - }, - "timeouts": null, - "vpc_id": "vpc-045ead8290dd948d4" - }, - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6NjAwMDAwMDAwMDAwfSwic2NoZW1hX3ZlcnNpb24iOiIxIn0=", - "depends_on": [ - "aws_eks_cluster.this" - ] - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_security_group_rule", - "name": "cluster_egress_internet", - "each": "list", - "provider": "provider.aws", - "instances": [ - { - "index_key": 0, - "schema_version": 2, - "attributes": { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "Allow cluster egress access to the Internet.", - "from_port": 0, - "id": "sgrule-3629728231", - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "-1", - "security_group_id": "sg-049b5a71010e17d3f", - "self": false, - "source_security_group_id": null, - "to_port": 0, - "type": "egress" - }, - "private": "eyJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", - "depends_on": [ - "aws_security_group.cluster[0]" - ] - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_security_group_rule", - "name": "cluster_https_worker_ingress", - "each": "list", - "provider": "provider.aws", - "instances": [ - { - "index_key": 0, - "schema_version": 2, - "attributes": { - "cidr_blocks": [], - "description": "Allow pods to communicate with the EKS cluster API.", - "from_port": 443, - "id": "sgrule-105930882", - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_group_id": "sg-049b5a71010e17d3f", - "self": false, - "source_security_group_id": "sg-0901b76a8e3e7055e", - "to_port": 443, - "type": "ingress" - }, - "private": "eyJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", - "depends_on": [ - "aws_security_group.cluster[0]" - ] - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_security_group_rule", - "name": "workers_egress_internet", - "each": "list", - "provider": "provider.aws", - "instances": [ - { - "index_key": 0, - "schema_version": 2, - "attributes": { - "cidr_blocks": [ - "0.0.0.0/0" - ], - "description": "Allow nodes all egress to the Internet.", - "from_port": 0, - "id": "sgrule-2908448787", - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "-1", - "security_group_id": "sg-0901b76a8e3e7055e", - "self": false, - "source_security_group_id": null, - "to_port": 0, - "type": "egress" - }, - "private": "eyJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", - "depends_on": [ - "aws_security_group.workers[0]" - ] - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_security_group_rule", - "name": "workers_ingress_cluster", - "each": "list", - "provider": "provider.aws", - "instances": [ - { - "index_key": 0, - "schema_version": 2, - "attributes": { - "cidr_blocks": [], - "description": "Allow workers pods to receive communication from the cluster control plane.", - "from_port": 1025, - "id": "sgrule-3437507016", - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_group_id": "sg-0901b76a8e3e7055e", - "self": false, - "source_security_group_id": "sg-049b5a71010e17d3f", - "to_port": 65535, - "type": "ingress" - }, - "private": "eyJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", - "depends_on": [ - "aws_security_group.workers[0]" - ] - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_security_group_rule", - "name": "workers_ingress_cluster_https", - "each": "list", - "provider": "provider.aws", - "instances": [ - { - "index_key": 0, - "schema_version": 2, - "attributes": { - "cidr_blocks": [], - "description": "Allow pods running extension API servers on port 443 to receive communication from cluster control plane.", - "from_port": 443, - "id": "sgrule-1750768348", - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "tcp", - "security_group_id": "sg-0901b76a8e3e7055e", - "self": false, - "source_security_group_id": "sg-049b5a71010e17d3f", - "to_port": 443, - "type": "ingress" - }, - "private": "eyJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", - "depends_on": [ - "aws_security_group.workers[0]" - ] - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_security_group_rule", - "name": "workers_ingress_cluster_kubelet", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "aws_security_group_rule", - "name": "workers_ingress_self", - "each": "list", - "provider": "provider.aws", - "instances": [ - { - "index_key": 0, - "schema_version": 2, - "attributes": { - "cidr_blocks": [], - "description": "Allow node to communicate with each other.", - "from_port": 0, - "id": "sgrule-785489295", - "ipv6_cidr_blocks": [], - "prefix_list_ids": [], - "protocol": "-1", - "security_group_id": "sg-0901b76a8e3e7055e", - "self": false, - "source_security_group_id": "sg-0901b76a8e3e7055e", - "to_port": 0, - "type": "ingress" - }, - "private": "eyJzY2hlbWFfdmVyc2lvbiI6IjIifQ==", - "depends_on": [ - "aws_security_group.workers[0]" - ] - } - ] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_subnet", - "name": "database", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_subnet", - "name": "elasticache", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_subnet", - "name": "intra", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_subnet", - "name": "private", - "each": "list", - "provider": "provider.aws", - "instances": [ - { - "index_key": 0, - "schema_version": 1, - "attributes": { - "arn": "arn:aws:ec2:us-west-2:385595570414:subnet/subnet-08b8c355f30ed58ba", - "assign_ipv6_address_on_creation": false, - "availability_zone": "us-west-2a", - "availability_zone_id": "usw2-az2", - "cidr_block": "10.0.16.0/20", - "id": "subnet-08b8c355f30ed58ba", - "ipv6_cidr_block": "", - "ipv6_cidr_block_association_id": "", - "map_public_ip_on_launch": false, - "owner_id": "385595570414", - "tags": { - "Name": "my-cluster-private-us-west-2a", - "kubernetes.io/cluster/my-cluster": "shared" - }, - "timeouts": null, - "vpc_id": "vpc-045ead8290dd948d4" - }, - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9" - }, - { - "index_key": 1, - "schema_version": 1, - "attributes": { - "arn": "arn:aws:ec2:us-west-2:385595570414:subnet/subnet-0869b629c725c799c", - "assign_ipv6_address_on_creation": false, - "availability_zone": "us-west-2b", - "availability_zone_id": "usw2-az1", - "cidr_block": "10.0.32.0/20", - "id": "subnet-0869b629c725c799c", - "ipv6_cidr_block": "", - "ipv6_cidr_block_association_id": "", - "map_public_ip_on_launch": false, - "owner_id": "385595570414", - "tags": { - "Name": "my-cluster-private-us-west-2b", - "kubernetes.io/cluster/my-cluster": "shared" - }, - "timeouts": null, - "vpc_id": "vpc-045ead8290dd948d4" - }, - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9" - }, - { - "index_key": 2, - "schema_version": 1, - "attributes": { - "arn": "arn:aws:ec2:us-west-2:385595570414:subnet/subnet-01c62f5324fe05605", - "assign_ipv6_address_on_creation": false, - "availability_zone": "us-west-2c", - "availability_zone_id": "usw2-az3", - "cidr_block": "10.0.48.0/20", - "id": "subnet-01c62f5324fe05605", - "ipv6_cidr_block": "", - "ipv6_cidr_block_association_id": "", - "map_public_ip_on_launch": false, - "owner_id": "385595570414", - "tags": { - "Name": "my-cluster-private-us-west-2c", - "kubernetes.io/cluster/my-cluster": "shared" - }, - "timeouts": null, - "vpc_id": "vpc-045ead8290dd948d4" - }, - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6MTIwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9" - } - ] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_subnet", - "name": "public", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_subnet", - "name": "redshift", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc", - "name": "this", - "each": "list", - "provider": "provider.aws", - "instances": [ - { - "index_key": 0, - "schema_version": 1, - "attributes": { - "arn": "arn:aws:ec2:us-west-2:385595570414:vpc/vpc-045ead8290dd948d4", - "assign_generated_ipv6_cidr_block": false, - "cidr_block": "10.0.0.0/16", - "default_network_acl_id": "acl-01d834214145117e1", - "default_route_table_id": "rtb-00a392b6b0f316355", - "default_security_group_id": "sg-0c9dd6ab66e0d2227", - "dhcp_options_id": "dopt-0062aa79", - "enable_classiclink": false, - "enable_classiclink_dns_support": false, - "enable_dns_hostnames": false, - "enable_dns_support": true, - "id": "vpc-045ead8290dd948d4", - "instance_tenancy": "default", - "ipv6_association_id": "", - "ipv6_cidr_block": "", - "main_route_table_id": "rtb-00a392b6b0f316355", - "owner_id": "385595570414", - "tags": { - "Name": "my-cluster", - "kubernetes.io/cluster/my-cluster": "shared" - } - }, - "private": "eyJzY2hlbWFfdmVyc2lvbiI6IjEifQ==" - } - ] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_dhcp_options", - "name": "this", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_dhcp_options_association", - "name": "this", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint", - "name": "apigw", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint", - "name": "cloudtrail", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint", - "name": "dynamodb", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint", - "name": "ec2", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint", - "name": "ec2messages", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint", - "name": "ecr_api", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint", - "name": "ecr_dkr", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint", - "name": "ecs", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint", - "name": "ecs_agent", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint", - "name": "ecs_telemetry", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint", - "name": "elasticloadbalancing", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint", - "name": "events", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint", - "name": "kms", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint", - "name": "logs", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint", - "name": "monitoring", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint", - "name": "s3", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint", - "name": "sns", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint", - "name": "sqs", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint", - "name": "ssm", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint", - "name": "ssmmessages", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint_route_table_association", - "name": "intra_dynamodb", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint_route_table_association", - "name": "intra_s3", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint_route_table_association", - "name": "private_dynamodb", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint_route_table_association", - "name": "private_s3", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint_route_table_association", - "name": "public_dynamodb", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_endpoint_route_table_association", - "name": "public_s3", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpc_ipv4_cidr_block_association", - "name": "this", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpn_gateway", - "name": "this", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpn_gateway_attachment", - "name": "this", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpn_gateway_route_propagation", - "name": "private", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.vpc", - "mode": "managed", - "type": "aws_vpn_gateway_route_propagation", - "name": "public", - "each": "list", - "provider": "provider.aws", - "instances": [] - }, - { - "module": "module.default-cluster", - "mode": "managed", - "type": "helm_release", - "name": "tidb-cluster", - "provider": "provider.helm.eks", - "instances": [] - }, - { - "module": "module.test-cluster", - "mode": "managed", - "type": "helm_release", - "name": "tidb-cluster", - "provider": "provider.helm.eks", - "instances": [] - }, - { - "module": "module.tidb-operator", - "mode": "managed", - "type": "helm_release", - "name": "tidb-operator", - "provider": "module.tidb-operator.provider.helm", - "instances": [ - { - "schema_version": 0, - "attributes": { - "chart": "tidb-operator", - "devel": null, - "disable_webhooks": false, - "force_update": false, - "id": "tidb-operator", - "keyring": null, - "metadata": [ - { - "chart": "tidb-operator", - "name": "tidb-operator", - "namespace": "tidb-admin", - "revision": 1, - "values": "{}\n", - "version": "v1.0.0-beta.3" - } - ], - "name": "tidb-operator", - "namespace": "tidb-admin", - "recreate_pods": false, - "repository": "pingcap", - "reuse": false, - "reuse_values": false, - "set": [], - "set_sensitive": [], - "set_string": [], - "status": "DEPLOYED", - "timeout": 300, - "values": [ - "" - ], - "verify": false, - "version": "v1.0.0-beta.3", - "wait": true - } - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "local_file", - "name": "config_map_aws_auth", - "each": "list", - "provider": "provider.local", - "instances": [ - { - "index_key": 0, - "schema_version": 0, - "attributes": { - "content": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: aws-auth\n namespace: kube-system\ndata:\n mapRoles: |\n - rolearn: arn:aws:iam::385595570414:role/my-cluster20190702141950357100000005\n username: system:node:{{EC2PrivateDNSName}}\n groups:\n - system:bootstrappers\n - system:nodes\n\n\n mapUsers: |\n\n mapAccounts: |\n\n", - "content_base64": null, - "filename": "credentials/config-map-aws-auth_my-cluster.yaml", - "id": "978ee48452ea948de06accdfb8813993dcee1a6b", - "sensitive_content": null - }, - "private": "bnVsbA==", - "depends_on": [ - "data.template_file.config_map_aws_auth" - ] - } - ] - }, - { - "module": "module.tidb-operator", - "mode": "managed", - "type": "local_file", - "name": "kubeconfig", - "provider": "provider.local", - "instances": [ - { - "schema_version": 0, - "attributes": { - "content": null, - "content_base64": null, - "filename": "credentials/kubeconfig_my-cluster", - "id": "94c5caf94ec9455972b0f8540c6aaa42617c43f8", - "sensitive_content": "apiVersion: v1\npreferences: {}\nkind: Config\n\nclusters:\n- cluster:\n server: https://2B09ED2329DF4012CC0A6705BFB14965.yl4.us-west-2.eks.amazonaws.com\n certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNU1EY3dNakUwTVRZMU9Wb1hEVEk1TURZeU9URTBNVFkxT1Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS0J4CmVKdlEzSldFbkZqckFBRVFVTDFUS0FTRURxaUt5QXlwd2JlN0UwVE5jQnBqR1kvUzFTZnRZMlZEOGtQbnFncjMKOHFuYXl3d1cvOERYdjEvN0t5OHdsU25GT2xuTjZZWk5vUm9jRFE2V1JNZ2Uzc3p5MFh3Ly9yWW5CQm8xeitFZQpHNnNVNzlXQjl4K1VKdWJNaWJ5NW9YYXNqUUNtZFVHOWkzVklMQTJHb1pGTnN0d1VOUTlTSUhaRlpIWkltVFRrCmdqcU11T3dxSGZJa3VDNlNDREUwTnhtSUVrWU8wb1J6WVdISCt6OEsxQjFhMGF2c1p4NzJGZjJ1UE5oTlE1RU0KQy95djFmL3daTDE2RHJBMURUTFJSWFlnbXQyL2lvQVRWRFpQdWhJQzkzWHNZMHI1blYrd2xWQml0NVZvYTJ5NAp6dXphdWhEM1gzMTBrZVBhUXVjQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFJTDBObG1CRysrdjl3bjd0eHpiaGxoeWVOM2MKbUE3NTNUSTBROWlGenJ3SERkV3J3TnMrbW9XSzlRbUJhOG1YTmhyZUpIYXpPUWt0cUNHQkM4OFZkeXhHZE9PZwo2SWVQSnA2NW83YzRYSXdTY1lkai9wWXpLeHZXaS82Zi9aa000TzAxQUpUZkVaWWVEbmpmaXZkaGtzdkd2VVFuCjI5TDZmQkg2dXh5a3k0cVJraW9kTkppMHVIazdaNUVYNStMciszNzhBeHBKOEJVMFpwbk1sNVhNUFR1bzE5VysKR3B5UElVQXBUamtQdC9NemQ4czdyWDJmQWRaYW9vUUNvNEw4dkpGWHdudDBHUUpzL3c5S2tiYXcxY00xcXRZTQpFa3hDQno5M0l1OVhyUFVoMG9tdXVyR2d5akRLdUJoTkZCby8wOGMvZmx6UVlSVGRCRmpXNTY5c3VVQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=\n name: eks_my-cluster\n\ncontexts:\n- context:\n cluster: eks_my-cluster\n user: eks_my-cluster\n name: eks_my-cluster\n\ncurrent-context: eks_my-cluster\n\nusers:\n- name: eks_my-cluster\n user:\n exec:\n apiVersion: client.authentication.k8s.io/v1alpha1\n command: aws-iam-authenticator\n args:\n - \"token\"\n - \"-i\"\n - \"my-cluster\"\n\n\n" - }, - "private": "bnVsbA==", - "depends_on": [ - "module.eks" - ] - } - ] - }, - { - "mode": "managed", - "type": "local_file", - "name": "kubeconfig", - "provider": "provider.local", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "local_file", - "name": "kubeconfig", - "each": "list", - "provider": "provider.local", - "instances": [ - { - "index_key": 0, - "schema_version": 0, - "attributes": { - "content": "apiVersion: v1\npreferences: {}\nkind: Config\n\nclusters:\n- cluster:\n server: https://2B09ED2329DF4012CC0A6705BFB14965.yl4.us-west-2.eks.amazonaws.com\n certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNU1EY3dNakUwTVRZMU9Wb1hEVEk1TURZeU9URTBNVFkxT1Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS0J4CmVKdlEzSldFbkZqckFBRVFVTDFUS0FTRURxaUt5QXlwd2JlN0UwVE5jQnBqR1kvUzFTZnRZMlZEOGtQbnFncjMKOHFuYXl3d1cvOERYdjEvN0t5OHdsU25GT2xuTjZZWk5vUm9jRFE2V1JNZ2Uzc3p5MFh3Ly9yWW5CQm8xeitFZQpHNnNVNzlXQjl4K1VKdWJNaWJ5NW9YYXNqUUNtZFVHOWkzVklMQTJHb1pGTnN0d1VOUTlTSUhaRlpIWkltVFRrCmdqcU11T3dxSGZJa3VDNlNDREUwTnhtSUVrWU8wb1J6WVdISCt6OEsxQjFhMGF2c1p4NzJGZjJ1UE5oTlE1RU0KQy95djFmL3daTDE2RHJBMURUTFJSWFlnbXQyL2lvQVRWRFpQdWhJQzkzWHNZMHI1blYrd2xWQml0NVZvYTJ5NAp6dXphdWhEM1gzMTBrZVBhUXVjQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFJTDBObG1CRysrdjl3bjd0eHpiaGxoeWVOM2MKbUE3NTNUSTBROWlGenJ3SERkV3J3TnMrbW9XSzlRbUJhOG1YTmhyZUpIYXpPUWt0cUNHQkM4OFZkeXhHZE9PZwo2SWVQSnA2NW83YzRYSXdTY1lkai9wWXpLeHZXaS82Zi9aa000TzAxQUpUZkVaWWVEbmpmaXZkaGtzdkd2VVFuCjI5TDZmQkg2dXh5a3k0cVJraW9kTkppMHVIazdaNUVYNStMciszNzhBeHBKOEJVMFpwbk1sNVhNUFR1bzE5VysKR3B5UElVQXBUamtQdC9NemQ4czdyWDJmQWRaYW9vUUNvNEw4dkpGWHdudDBHUUpzL3c5S2tiYXcxY00xcXRZTQpFa3hDQno5M0l1OVhyUFVoMG9tdXVyR2d5akRLdUJoTkZCby8wOGMvZmx6UVlSVGRCRmpXNTY5c3VVQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=\n name: eks_my-cluster\n\ncontexts:\n- context:\n cluster: eks_my-cluster\n user: eks_my-cluster\n name: eks_my-cluster\n\ncurrent-context: eks_my-cluster\n\nusers:\n- name: eks_my-cluster\n user:\n exec:\n apiVersion: client.authentication.k8s.io/v1alpha1\n command: aws-iam-authenticator\n args:\n - \"token\"\n - \"-i\"\n - \"my-cluster\"\n\n\n", - "content_base64": null, - "filename": "credentials/kubeconfig_my-cluster", - "id": "94c5caf94ec9455972b0f8540c6aaa42617c43f8", - "sensitive_content": null - }, - "private": "bnVsbA==", - "depends_on": [ - "data.template_file.kubeconfig" - ] - } - ] - }, - { - "module": "module.key-pair", - "mode": "managed", - "type": "local_file", - "name": "private_key_pem", - "each": "list", - "provider": "provider.local", - "instances": [] - }, - { - "module": "module.key-pair", - "mode": "managed", - "type": "local_file", - "name": "public_key_openssh", - "each": "list", - "provider": "provider.local", - "instances": [] - }, - { - "module": "module.key-pair", - "mode": "managed", - "type": "null_resource", - "name": "chmod", - "each": "list", - "provider": "provider.null", - "instances": [] - }, - { - "module": "module.tidb-operator", - "mode": "managed", - "type": "null_resource", - "name": "setup-env", - "provider": "provider.null", - "instances": [ - { - "schema_version": 0, - "attributes": { - "id": "4147419460234023980", - "triggers": null - }, - "depends_on": [ - "local_file.kubeconfig" - ] - } - ] - }, - { - "module": "module.default-cluster", - "mode": "managed", - "type": "null_resource", - "name": "tags_as_list_of_maps", - "each": "list", - "provider": "provider.null", - "instances": [] - }, - { - "module": "module.test-cluster", - "mode": "managed", - "type": "null_resource", - "name": "tags_as_list_of_maps", - "each": "list", - "provider": "provider.null", - "instances": [] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "null_resource", - "name": "tags_as_list_of_maps", - "each": "list", - "provider": "provider.null", - "instances": [ - { - "index_key": 0, - "schema_version": 0, - "attributes": { - "id": "8699955636061529248", - "triggers": { - "key": "app", - "propagate_at_launch": "true", - "value": "tidb" - } - } - } - ] - }, - { - "module": "module.tidb-operator.module.eks", - "mode": "managed", - "type": "null_resource", - "name": "update_config_map_aws_auth", - "each": "list", - "provider": "provider.null", - "instances": [ - { - "index_key": 0, - "schema_version": 0, - "attributes": { - "id": "7794443773218902071", - "triggers": { - "config_map_rendered": "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: aws-auth\n namespace: kube-system\ndata:\n mapRoles: |\n - rolearn: arn:aws:iam::385595570414:role/my-cluster20190702141950357100000005\n username: system:node:{{EC2PrivateDNSName}}\n groups:\n - system:bootstrappers\n - system:nodes\n\n\n mapUsers: |\n\n mapAccounts: |\n\n", - "endpoint": "https://2B09ED2329DF4012CC0A6705BFB14965.yl4.us-west-2.eks.amazonaws.com", - "kube_config_map_rendered": "apiVersion: v1\npreferences: {}\nkind: Config\n\nclusters:\n- cluster:\n server: https://2B09ED2329DF4012CC0A6705BFB14965.yl4.us-west-2.eks.amazonaws.com\n certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNU1EY3dNakUwTVRZMU9Wb1hEVEk1TURZeU9URTBNVFkxT1Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS0J4CmVKdlEzSldFbkZqckFBRVFVTDFUS0FTRURxaUt5QXlwd2JlN0UwVE5jQnBqR1kvUzFTZnRZMlZEOGtQbnFncjMKOHFuYXl3d1cvOERYdjEvN0t5OHdsU25GT2xuTjZZWk5vUm9jRFE2V1JNZ2Uzc3p5MFh3Ly9yWW5CQm8xeitFZQpHNnNVNzlXQjl4K1VKdWJNaWJ5NW9YYXNqUUNtZFVHOWkzVklMQTJHb1pGTnN0d1VOUTlTSUhaRlpIWkltVFRrCmdqcU11T3dxSGZJa3VDNlNDREUwTnhtSUVrWU8wb1J6WVdISCt6OEsxQjFhMGF2c1p4NzJGZjJ1UE5oTlE1RU0KQy95djFmL3daTDE2RHJBMURUTFJSWFlnbXQyL2lvQVRWRFpQdWhJQzkzWHNZMHI1blYrd2xWQml0NVZvYTJ5NAp6dXphdWhEM1gzMTBrZVBhUXVjQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFJTDBObG1CRysrdjl3bjd0eHpiaGxoeWVOM2MKbUE3NTNUSTBROWlGenJ3SERkV3J3TnMrbW9XSzlRbUJhOG1YTmhyZUpIYXpPUWt0cUNHQkM4OFZkeXhHZE9PZwo2SWVQSnA2NW83YzRYSXdTY1lkai9wWXpLeHZXaS82Zi9aa000TzAxQUpUZkVaWWVEbmpmaXZkaGtzdkd2VVFuCjI5TDZmQkg2dXh5a3k0cVJraW9kTkppMHVIazdaNUVYNStMciszNzhBeHBKOEJVMFpwbk1sNVhNUFR1bzE5VysKR3B5UElVQXBUamtQdC9NemQ4czdyWDJmQWRaYW9vUUNvNEw4dkpGWHdudDBHUUpzL3c5S2tiYXcxY00xcXRZTQpFa3hDQno5M0l1OVhyUFVoMG9tdXVyR2d5akRLdUJoTkZCby8wOGMvZmx6UVlSVGRCRmpXNTY5c3VVQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=\n name: eks_my-cluster\n\ncontexts:\n- context:\n cluster: eks_my-cluster\n user: eks_my-cluster\n name: eks_my-cluster\n\ncurrent-context: eks_my-cluster\n\nusers:\n- name: eks_my-cluster\n user:\n exec:\n apiVersion: client.authentication.k8s.io/v1alpha1\n command: aws-iam-authenticator\n args:\n - \"token\"\n - \"-i\"\n - \"my-cluster\"\n\n\n" - } - }, - "depends_on": [ - "aws_eks_cluster.this", - "data.template_file.config_map_aws_auth", - "data.template_file.kubeconfig" - ] - } - ] - }, - { - "module": "module.default-cluster", - "mode": "managed", - "type": "null_resource", - "name": "wait-tidb-ready", - "provider": "provider.null", - "instances": [] - }, - { - "module": "module.test-cluster", - "mode": "managed", - "type": "null_resource", - "name": "wait-tidb-ready", - "provider": "provider.null", - "instances": [] - }, - { - "module": "module.key-pair", - "mode": "managed", - "type": "tls_private_key", - "name": "generated", - "provider": "provider.tls", - "instances": [ - { - "schema_version": 0, - "attributes": { - "algorithm": "RSA", - "ecdsa_curve": "P224", - "id": "9d6496bbd72df74a70b397d78f726756ce87e9d1", - "private_key_pem": "-----BEGIN RSA PRIVATE KEY-----\nMIIEogIBAAKCAQEAssPgwsgg48MSHkeo6AiSKZWgq8q4lO61+Y5jqotixxgr62og\nrYX2cNxEAXPZLYCE12ejgi3Ed5juoLm+ZhM5EWUBVJBeA5IlMQf/HprqniYEu54K\nuV7wwEaqgjU9Tdnv3DqfiyHjOLwqX22UHGczs6fO00hhdcfDpMqg5qgltcJNHPfE\nG2Yh9gSWi8TvYZq31UwHlED4mX7Z9utC6ptMhDWLonbNDaVg+LrLpRN+rLETccsW\njTD0QNO71WtSIRvBszMCpOWAAkSYXbWUBFxThaNIzbha0exV/g4PFDd2L8VAEySD\nK9lHjM1iI6Ea1wXdSxB+iVsctVdqMK1j4uBOTwIDAQABAoIBAFjF3vq7aWHRwFCb\nI2y+vN67uEM3c3w6ItIf/Kx3dYELJtxMIpgpO12GdJsIDaSD+vQBOnhuA++kWXQl\naUDFcQSLVSLKYnWBgMidgPqQ0cvhc148OHUfiYziStBIYf4kKPIDhrEQDgdhoeUr\nxG5qbYlc3t+bRRK5NhXCri589+UPGKjYD+Qzjm7VKlih+OOgRDLkD4nu+BDBlsVJ\ntaWsfZMjsKUp35sV1fvDMiap+FsI6i/CBZpf4eCnMXDQt5XnBrXrUeDxf/aZw1g9\n+niwzyjJCNTv6omA1YpJYA4p6WftcSSJWk7GvnelTwyQlXgjm6ZbM7BkM+iJkllm\nZlEOT4ECgYEA3Q+Y16DGazwy5C5XPCOTPNF3J91ib/ko0FfmKLzlEj/+jtIL2+o6\ndzd6ezvGU9c23Qp4FiILHVumxL8O+OYdGmOkhx3SqxbtqDODUHJYXumFUv0Jb3BH\nOjQgzCq/fUOcAYHbjdPIRhnUZ4yRoYDnerGPGVO2RyUMpf7AIDtlPI8CgYEAzwTw\nyyQD7g1uKptQ4f5IG/XqG9mmHFQ2Gu9uO3bvhZYtQEhArfh7OgnskK2L0fKcbZIX\nF1JBgJW8bNV6/9P//mts+Y4cValuqZtCmJjPOw2nX/u0X+/eJRHavfbFNlmACaMd\nBx7HGDHR6yZp9Of0TbUt56m7TBlSifaISk4oMkECgYAETrJ+uR5EpqajNZfzjwnm\nbHpy52hsoCFAdgYBEzUvdtnB9KvQfC7pdcZIMnD53z6tbe/LFpy61LdaLBLhnLJC\nemCRVW5ucQLufRp47dF0//3eERom9rwckTl2YPrcOP4INXyOteq4Gva9kcqgp/9a\nr60HJE9v8XPepCkgN6gQVwKBgFYAfXBG5AMPPUciAvX/x0EmZj1vq9x09401DpxR\niqv6eY4M9iHP6pFv8gEgt8defLHgUQt1NpUOn5qvDUwebGjrg/ggm5DStJBtWbs/\nMEgeIfxz+rkoUycfRbpJPCCaCeD3DGYa2Scp+0UvTjFZ81oc/JcTIiY5FtsNugz0\nbyqBAoGAe5+S7sO613qrSp1iL3Es9HoMnPniwZAmGyhhSG5bFCThKlKolR2XwzV0\n/bz78tdz9KAzEfalAwBt2b394nNttKYqQSwJYpDVWAgzycRDgvjOR5J4pAapS4sF\nH2R2SvIcXGxAOMp3eR8Ysq/Q06kKnGxOapt2gyDU3yJxjaE5PrQ=\n-----END RSA PRIVATE KEY-----\n", - "public_key_fingerprint_md5": "52:e4:05:eb:0f:b4:aa:34:46:27:1a:21:63:40:f5:39", - "public_key_openssh": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyw+DCyCDjwxIeR6joCJIplaCryriU7rX5jmOqi2LHGCvraiCthfZw3EQBc9ktgITXZ6OCLcR3mO6gub5mEzkRZQFUkF4DkiUxB/8emuqeJgS7ngq5XvDARqqCNT1N2e/cOp+LIeM4vCpfbZQcZzOzp87TSGF1x8OkyqDmqCW1wk0c98QbZiH2BJaLxO9hmrfVTAeUQPiZftn260Lqm0yENYuids0NpWD4usulE36ssRNxyxaNMPRA07vVa1IhG8GzMwKk5YACRJhdtZQEXFOFo0jNuFrR7FX+Dg8UN3YvxUATJIMr2UeMzWIjoRrXBd1LEH6JWxy1V2owrWPi4E5P\n", - "public_key_pem": "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAssPgwsgg48MSHkeo6AiS\nKZWgq8q4lO61+Y5jqotixxgr62ogrYX2cNxEAXPZLYCE12ejgi3Ed5juoLm+ZhM5\nEWUBVJBeA5IlMQf/HprqniYEu54KuV7wwEaqgjU9Tdnv3DqfiyHjOLwqX22UHGcz\ns6fO00hhdcfDpMqg5qgltcJNHPfEG2Yh9gSWi8TvYZq31UwHlED4mX7Z9utC6ptM\nhDWLonbNDaVg+LrLpRN+rLETccsWjTD0QNO71WtSIRvBszMCpOWAAkSYXbWUBFxT\nhaNIzbha0exV/g4PFDd2L8VAEySDK9lHjM1iI6Ea1wXdSxB+iVsctVdqMK1j4uBO\nTwIDAQAB\n-----END PUBLIC KEY-----\n", - "rsa_bits": 2048 - } - } - ] - } - ] -} From 2e63e5581d5d97eed45b38c2a283eaa8fcf30fae Mon Sep 17 00:00:00 2001 From: Aylei Date: Wed, 3 Jul 2019 09:15:31 +0800 Subject: [PATCH 10/11] Address review comments Signed-off-by: Aylei --- deploy/aws/README.md | 2 +- deploy/aws/aws-tutorial.tfvars | 2 +- deploy/aws/tidb-cluster/local.tf | 4 ++-- .../aws/tidb-operator/manifests/local-volume-provisioner.yaml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/deploy/aws/README.md b/deploy/aws/README.md index 87357b294a..d78ac475ce 100644 --- a/deploy/aws/README.md +++ b/deploy/aws/README.md @@ -178,7 +178,7 @@ variable "operator_values" { } ``` -## Multiple Cluster Management +## Multiple TiDB Cluster Management An instance of `./tidb-cluster` module corresponds to a TiDB cluster in the EKS cluster. If you want to add a new TiDB cluster, you can edit `./cluster.tf` and add a new instance of `./tidb-cluster` module: diff --git a/deploy/aws/aws-tutorial.tfvars b/deploy/aws/aws-tutorial.tfvars index 6326ec3e03..6f71319587 100644 --- a/deploy/aws/aws-tutorial.tfvars +++ b/deploy/aws/aws-tutorial.tfvars @@ -7,4 +7,4 @@ default_cluster_pd_count = 1 default_cluster_tikv_count = 1 default_cluster_tidb_count = 1 -default_cluster_cluster_name = "aws_tutorial" +default_cluster_cluster_name = "aws-tutorial" diff --git a/deploy/aws/tidb-cluster/local.tf b/deploy/aws/tidb-cluster/local.tf index 169b47cbd9..9bacd33a45 100644 --- a/deploy/aws/tidb-cluster/local.tf +++ b/deploy/aws/tidb-cluster/local.tf @@ -42,7 +42,7 @@ locals { instance_type = var.pd_instance_type root_volume_size = "50" public_ip = true - kubelet_extra_args = "--register-with-taints=dedicated=${var.cluster_name}-pd:NoSchedule --node-labels=dedicated=${var.cluster_name}-pd,localVolume=true" + kubelet_extra_args = "--register-with-taints=dedicated=${var.cluster_name}-pd:NoSchedule --node-labels=dedicated=${var.cluster_name}-pd,pingcap.com/aws-local-ssd=true" asg_desired_capacity = var.pd_count asg_max_size = var.pd_count + 2 # additional_userdata = file("userdata.sh") @@ -53,7 +53,7 @@ locals { instance_type = var.tikv_instance_type root_volume_size = "50" public_ip = false - kubelet_extra_args = "--register-with-taints=dedicated=${var.cluster_name}-tikv:NoSchedule --node-labels=dedicated=${var.cluster_name}-tikv,localVolume=true" + kubelet_extra_args = "--register-with-taints=dedicated=${var.cluster_name}-tikv:NoSchedule --node-labels=dedicated=${var.cluster_name}-tikv,pingcap.com/aws-local-ssd=true" asg_desired_capacity = var.tikv_count asg_max_size = var.tikv_count + 2 pre_userdata = file("${path.module}/pre_userdata") diff --git a/deploy/aws/tidb-operator/manifests/local-volume-provisioner.yaml b/deploy/aws/tidb-operator/manifests/local-volume-provisioner.yaml index ebbd1bf11e..b8bc32f713 100644 --- a/deploy/aws/tidb-operator/manifests/local-volume-provisioner.yaml +++ b/deploy/aws/tidb-operator/manifests/local-volume-provisioner.yaml @@ -39,7 +39,7 @@ spec: operator: Exists effect: "NoSchedule" nodeSelector: - localVolume: "true" + pingcap.com/aws-local-ssd: "true" serviceAccountName: local-storage-admin containers: - image: "quay.io/external_storage/local-volume-provisioner:v2.2.0" From e06b32668a7bbfc05bc730e498764329b61be206 Mon Sep 17 00:00:00 2001 From: Aylei Date: Wed, 3 Jul 2019 09:52:02 +0800 Subject: [PATCH 11/11] Document module reusing Signed-off-by: Aylei --- deploy/aws/README.md | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/deploy/aws/README.md b/deploy/aws/README.md index d78ac475ce..5068e0ec94 100644 --- a/deploy/aws/README.md +++ b/deploy/aws/README.md @@ -260,3 +260,28 @@ $ terraform destroy > **Note:** > > You have to manually delete the EBS volumes in AWS console after running terraform destroy if you do not need the data on the volumes anymore. + +## Advanced Guide: Use the tidb-cluster and tidb-operator Modules + +Under the hood, this terraform module composes two sub-modules: + +- [tidb-operator](./tidb-operator/README.md), which provisions the Kubernetes control plane for TiDB cluster +- [tidb-cluster](./tidb-cluster/README.md), which provisions a TiDB cluster in the target Kubernetes cluster + +You can use these modules separately in your own terraform scripts, by either referencing these modules locally or publish these modules to your terraform module registry. + +For example, let's say you create a terraform module in `/deploy/aws/staging`, you can reference the tidb-operator and tidb-cluster modules as following: + +```hcl +module "setup-control-plane" { + source = "../tidb-operator" +} + +module "tidb-cluster-a" { + source = "../tidb-cluster" +} + +module "tidb-cluster-b" { + source = "../tidb-cluster" +} +```