From 28ccecefe22d81a3a7febbbc3efc17c6590f88e1 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Fri, 14 Oct 2022 09:16:57 -0400 Subject: [PATCH] fix: Disable creation of cluster security group rules that map to node security group when `create_node_security_group` = `false` (#2274) * fix: Disable creation of cluster security group rules that map to node security group when `create_node_security_group` = `false` * feat: Update Fargate example to run only Fargate and show disabling of both cluster and node security groups * fix: Ensure CoreDNS changes are made ahead of install --- docs/faq.md | 4 +- examples/fargate_profile/README.md | 10 +- examples/fargate_profile/main.tf | 195 ++++++++++++++++++++------- examples/fargate_profile/versions.tf | 10 +- main.tf | 5 +- 5 files changed, 171 insertions(+), 53 deletions(-) diff --git a/docs/faq.md b/docs/faq.md index 4a73b7a8d7..c1ab564ee0 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -26,7 +26,7 @@ By default, EKS creates a cluster primary security group that is created outside attach_cluster_primary_security_group = true # default is false node_security_group_tags = { - "kubernetes.io/cluster/" = "" # or any other value other than "owned" + "kubernetes.io/cluster/" = null # or any other value other than "owned" } ``` @@ -36,7 +36,7 @@ By default, EKS creates a cluster primary security group that is created outside attach_cluster_primary_security_group = true # default is false cluster_tags = { - "kubernetes.io/cluster/" = "" # or any other value other than "owned" + "kubernetes.io/cluster/" = null # or any other value other than "owned" } ``` diff --git a/examples/fargate_profile/README.md b/examples/fargate_profile/README.md index d8a1bfecf3..dddbc5755e 100644 --- a/examples/fargate_profile/README.md +++ b/examples/fargate_profile/README.md @@ -21,13 +21,16 @@ Note that this example may create resources which cost money. Run `terraform des |------|---------| | [terraform](#requirement\_terraform) | >= 0.13.1 | | [aws](#requirement\_aws) | >= 3.72 | -| [kubernetes](#requirement\_kubernetes) | >= 2.10 | +| [helm](#requirement\_helm) | >= 2.7 | +| [null](#requirement\_null) | >= 3.0 | ## Providers | Name | Version | |------|---------| | [aws](#provider\_aws) | >= 3.72 | +| [helm](#provider\_helm) | >= 2.7 | +| [null](#provider\_null) | >= 3.0 | ## Modules @@ -41,6 +44,11 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Type | |------|------| | [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource | +| [helm_release.coredns](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [null_resource.modify_kube_dns](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.remove_default_coredns_deployment](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [aws_eks_addon_version.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_addon_version) | data source | +| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | ## Inputs diff --git a/examples/fargate_profile/main.tf b/examples/fargate_profile/main.tf index 6c80e22c41..a7b3f8f903 100644 --- a/examples/fargate_profile/main.tf +++ b/examples/fargate_profile/main.tf @@ -2,6 +2,20 @@ provider "aws" { region = local.region } +provider "helm" { + kubernetes { + host = module.eks.cluster_endpoint + cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_id] + } + } +} + locals { name = "ex-${replace(basename(path.cwd), "_", "-")}" cluster_version = "1.22" @@ -27,14 +41,8 @@ module "eks" { cluster_endpoint_public_access = true cluster_addons = { - # Note: https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html#fargate-gs-coredns - coredns = { - resolve_conflicts = "OVERWRITE" - } kube-proxy = {} - vpc-cni = { - resolve_conflicts = "OVERWRITE" - } + vpc-cni = {} } cluster_encryption_config = [{ @@ -45,28 +53,13 @@ module "eks" { vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets - # You require a node group to schedule coredns which is critical for running correctly internal DNS. - # If you want to use only fargate you must follow docs `(Optional) Update CoreDNS` - # available under https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html - eks_managed_node_groups = { - example = { - desired_size = 1 - - instance_types = ["t3.large"] - labels = { - Example = "managed_node_groups" - GithubRepo = "terraform-aws-eks" - GithubOrg = "terraform-aws-modules" - } - tags = { - ExtraTag = "example" - } - } - } + # Fargate profiles use the cluster primary security group so these are not utilized + create_cluster_security_group = false + create_node_security_group = false fargate_profiles = { - default = { - name = "default" + example = { + name = "example" selectors = [ { namespace = "backend" @@ -75,15 +68,18 @@ module "eks" { } }, { - namespace = "default" + namespace = "app-*" labels = { - WorkerType = "fargate" + Application = "app-wildcard" } } ] + # Using specific subnets instead of the subnets supplied for the cluster itself + subnet_ids = [module.vpc.private_subnets[1]] + tags = { - Owner = "default" + Owner = "secondary" } timeouts = { @@ -92,29 +88,138 @@ module "eks" { } } - secondary = { - name = "secondary" + kube_system = { + name = "kube-system" selectors = [ - { - namespace = "default" - labels = { - Environment = "test" - GithubRepo = "terraform-aws-eks" - GithubOrg = "terraform-aws-modules" - } - } + { namespace = "kube-system" } ] + } + } - # Using specific subnets instead of the subnets supplied for the cluster itself - subnet_ids = [module.vpc.private_subnets[1]] + tags = local.tags +} - tags = { - Owner = "secondary" +################################################################################ +# Modify EKS CoreDNS Deployment +################################################################################ + +data "aws_eks_cluster_auth" "this" { + name = module.eks.cluster_id +} + +locals { + kubeconfig = yamlencode({ + apiVersion = "v1" + kind = "Config" + current-context = "terraform" + clusters = [{ + name = module.eks.cluster_id + cluster = { + certificate-authority-data = module.eks.cluster_certificate_authority_data + server = module.eks.cluster_endpoint + } + }] + contexts = [{ + name = "terraform" + context = { + cluster = module.eks.cluster_id + user = "terraform" } + }] + users = [{ + name = "terraform" + user = { + token = data.aws_eks_cluster_auth.this.token + } + }] + }) +} + +# Separate resource so that this is only ever executed once +resource "null_resource" "remove_default_coredns_deployment" { + triggers = {} + + provisioner "local-exec" { + interpreter = ["/bin/bash", "-c"] + environment = { + KUBECONFIG = base64encode(local.kubeconfig) } + + # We are removing the deployment provided by the EKS service and replacing it through the self-managed CoreDNS Helm addon + # However, we are maintaing the existing kube-dns service and annotating it for Helm to assume control + command = <<-EOT + kubectl --namespace kube-system delete deployment coredns --kubeconfig <(echo $KUBECONFIG | base64 --decode) + EOT } +} - tags = local.tags +resource "null_resource" "modify_kube_dns" { + triggers = {} + + provisioner "local-exec" { + interpreter = ["/bin/bash", "-c"] + environment = { + KUBECONFIG = base64encode(local.kubeconfig) + } + + # We are maintaing the existing kube-dns service and annotating it for Helm to assume control + command = <<-EOT + echo "Setting implicit dependency on ${module.eks.fargate_profiles["kube_system"].fargate_profile_pod_execution_role_arn}" + kubectl --namespace kube-system annotate --overwrite service kube-dns meta.helm.sh/release-name=coredns --kubeconfig <(echo $KUBECONFIG | base64 --decode) + kubectl --namespace kube-system annotate --overwrite service kube-dns meta.helm.sh/release-namespace=kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode) + kubectl --namespace kube-system label --overwrite service kube-dns app.kubernetes.io/managed-by=Helm --kubeconfig <(echo $KUBECONFIG | base64 --decode) + EOT + } + + depends_on = [ + null_resource.remove_default_coredns_deployment + ] +} + +################################################################################ +# CoreDNS Helm Chart (self-managed) +################################################################################ + +data "aws_eks_addon_version" "this" { + for_each = toset(["coredns"]) + + addon_name = each.value + kubernetes_version = module.eks.cluster_version + most_recent = true +} + +resource "helm_release" "coredns" { + name = "coredns" + namespace = "kube-system" + create_namespace = false + description = "CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS Services" + chart = "coredns" + version = "1.19.4" + repository = "https://coredns.github.io/helm" + + # For EKS image repositories https://docs.aws.amazon.com/eks/latest/userguide/add-ons-images.html + values = [ + <<-EOT + image: + repository: 602401143452.dkr.ecr.eu-west-1.amazonaws.com/eks/coredns + tag: ${data.aws_eks_addon_version.this["coredns"].version} + deployment: + name: coredns + annotations: + eks.amazonaws.com/compute-type: fargate + service: + name: kube-dns + annotations: + eks.amazonaws.com/compute-type: fargate + podAnnotations: + eks.amazonaws.com/compute-type: fargate + EOT + ] + + depends_on = [ + # Need to ensure the CoreDNS updates are peformed before provisioning + null_resource.modify_kube_dns + ] } ################################################################################ diff --git a/examples/fargate_profile/versions.tf b/examples/fargate_profile/versions.tf index 6d6dc45be6..5128bc99e3 100644 --- a/examples/fargate_profile/versions.tf +++ b/examples/fargate_profile/versions.tf @@ -6,9 +6,13 @@ terraform { source = "hashicorp/aws" version = ">= 3.72" } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.10" + helm = { + source = "hashicorp/helm" + version = ">= 2.7" + } + null = { + source = "hashicorp/null" + version = ">= 3.0" } } } diff --git a/main.tf b/main.tf index 6de17d5cf1..a17d4a1499 100644 --- a/main.tf +++ b/main.tf @@ -128,7 +128,8 @@ locals { cluster_security_group_id = local.create_cluster_sg ? aws_security_group.cluster[0].id : var.cluster_security_group_id - cluster_security_group_rules = { + # Do not add rules to node security group if the module is not creating it + cluster_security_group_rules = local.create_node_sg ? { ingress_nodes_443 = { description = "Node groups to cluster API" protocol = "tcp" @@ -153,7 +154,7 @@ locals { type = "egress" source_node_security_group = true } - } + } : {} } resource "aws_security_group" "cluster" {