Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: Disable creation of cluster security group rules that map to node security group when create_node_security_group = false #2274

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions docs/faq.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ By default, EKS creates a cluster primary security group that is created outside
attach_cluster_primary_security_group = true # default is false

node_security_group_tags = {
"kubernetes.io/cluster/<CLUSTER_NAME>" = "" # or any other value other than "owned"
"kubernetes.io/cluster/<CLUSTER_NAME>" = null # or any other value other than "owned"
}
```

Expand All @@ -36,7 +36,7 @@ By default, EKS creates a cluster primary security group that is created outside
attach_cluster_primary_security_group = true # default is false

cluster_tags = {
"kubernetes.io/cluster/<CLUSTER_NAME>" = "" # or any other value other than "owned"
"kubernetes.io/cluster/<CLUSTER_NAME>" = null # or any other value other than "owned"
}
```

Expand Down
10 changes: 9 additions & 1 deletion examples/fargate_profile/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,16 @@ Note that this example may create resources which cost money. Run `terraform des
|------|---------|
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.72 |
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | >= 2.10 |
| <a name="requirement_helm"></a> [helm](#requirement\_helm) | >= 2.7 |
| <a name="requirement_null"></a> [null](#requirement\_null) | >= 3.0 |

## Providers

| Name | Version |
|------|---------|
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.72 |
| <a name="provider_helm"></a> [helm](#provider\_helm) | >= 2.7 |
| <a name="provider_null"></a> [null](#provider\_null) | >= 3.0 |

## Modules

Expand All @@ -41,6 +44,11 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Type |
|------|------|
| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
| [helm_release.coredns](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
| [null_resource.modify_kube_dns](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [null_resource.remove_default_coredns_deployment](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [aws_eks_addon_version.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_addon_version) | data source |
| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |

## Inputs

Expand Down
195 changes: 150 additions & 45 deletions examples/fargate_profile/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,20 @@ provider "aws" {
region = local.region
}

provider "helm" {
kubernetes {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)

exec {
api_version = "client.authentication.k8s.io/v1beta1"
command = "aws"
# This requires the awscli to be installed locally where Terraform is executed
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_id]
}
}
}

locals {
name = "ex-${replace(basename(path.cwd), "_", "-")}"
cluster_version = "1.22"
Expand All @@ -27,14 +41,8 @@ module "eks" {
cluster_endpoint_public_access = true

cluster_addons = {
# Note: https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html#fargate-gs-coredns
coredns = {
resolve_conflicts = "OVERWRITE"
}
kube-proxy = {}
vpc-cni = {
resolve_conflicts = "OVERWRITE"
}
vpc-cni = {}
}

cluster_encryption_config = [{
Expand All @@ -45,28 +53,13 @@ module "eks" {
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets

# You require a node group to schedule coredns which is critical for running correctly internal DNS.
# If you want to use only fargate you must follow docs `(Optional) Update CoreDNS`
# available under https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html
eks_managed_node_groups = {
example = {
desired_size = 1

instance_types = ["t3.large"]
labels = {
Example = "managed_node_groups"
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
tags = {
ExtraTag = "example"
}
}
}
# Fargate profiles use the cluster primary security group so these are not utilized
create_cluster_security_group = false
create_node_security_group = false

fargate_profiles = {
default = {
name = "default"
example = {
name = "example"
selectors = [
{
namespace = "backend"
Expand All @@ -75,15 +68,18 @@ module "eks" {
}
},
{
namespace = "default"
namespace = "app-*"
labels = {
WorkerType = "fargate"
Application = "app-wildcard"
}
}
]

# Using specific subnets instead of the subnets supplied for the cluster itself
subnet_ids = [module.vpc.private_subnets[1]]

tags = {
Owner = "default"
Owner = "secondary"
}

timeouts = {
Expand All @@ -92,29 +88,138 @@ module "eks" {
}
}

secondary = {
name = "secondary"
kube_system = {
name = "kube-system"
selectors = [
{
namespace = "default"
labels = {
Environment = "test"
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
}
{ namespace = "kube-system" }
]
}
}

# Using specific subnets instead of the subnets supplied for the cluster itself
subnet_ids = [module.vpc.private_subnets[1]]
tags = local.tags
}

tags = {
Owner = "secondary"
################################################################################
# Modify EKS CoreDNS Deployment
################################################################################

data "aws_eks_cluster_auth" "this" {
name = module.eks.cluster_id
}

locals {
kubeconfig = yamlencode({
apiVersion = "v1"
kind = "Config"
current-context = "terraform"
clusters = [{
name = module.eks.cluster_id
cluster = {
certificate-authority-data = module.eks.cluster_certificate_authority_data
server = module.eks.cluster_endpoint
}
}]
contexts = [{
name = "terraform"
context = {
cluster = module.eks.cluster_id
user = "terraform"
}
}]
users = [{
name = "terraform"
user = {
token = data.aws_eks_cluster_auth.this.token
}
}]
})
}

# Separate resource so that this is only ever executed once
resource "null_resource" "remove_default_coredns_deployment" {
triggers = {}

provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
environment = {
KUBECONFIG = base64encode(local.kubeconfig)
}

# We are removing the deployment provided by the EKS service and replacing it through the self-managed CoreDNS Helm addon
# However, we are maintaing the existing kube-dns service and annotating it for Helm to assume control
command = <<-EOT
kubectl --namespace kube-system delete deployment coredns --kubeconfig <(echo $KUBECONFIG | base64 --decode)
EOT
}
}

tags = local.tags
resource "null_resource" "modify_kube_dns" {
triggers = {}

provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
environment = {
KUBECONFIG = base64encode(local.kubeconfig)
}

# We are maintaing the existing kube-dns service and annotating it for Helm to assume control
command = <<-EOT
echo "Setting implicit dependency on ${module.eks.fargate_profiles["kube_system"].fargate_profile_pod_execution_role_arn}"
kubectl --namespace kube-system annotate --overwrite service kube-dns meta.helm.sh/release-name=coredns --kubeconfig <(echo $KUBECONFIG | base64 --decode)
kubectl --namespace kube-system annotate --overwrite service kube-dns meta.helm.sh/release-namespace=kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
kubectl --namespace kube-system label --overwrite service kube-dns app.kubernetes.io/managed-by=Helm --kubeconfig <(echo $KUBECONFIG | base64 --decode)
EOT
}

depends_on = [
null_resource.remove_default_coredns_deployment
]
}

################################################################################
# CoreDNS Helm Chart (self-managed)
################################################################################

data "aws_eks_addon_version" "this" {
for_each = toset(["coredns"])

addon_name = each.value
kubernetes_version = module.eks.cluster_version
most_recent = true
}

resource "helm_release" "coredns" {
name = "coredns"
namespace = "kube-system"
create_namespace = false
description = "CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS Services"
chart = "coredns"
version = "1.19.4"
repository = "https://coredns.github.io/helm"

# For EKS image repositories https://docs.aws.amazon.com/eks/latest/userguide/add-ons-images.html
values = [
<<-EOT
image:
repository: 602401143452.dkr.ecr.eu-west-1.amazonaws.com/eks/coredns
tag: ${data.aws_eks_addon_version.this["coredns"].version}
deployment:
name: coredns
annotations:
eks.amazonaws.com/compute-type: fargate
service:
name: kube-dns
annotations:
eks.amazonaws.com/compute-type: fargate
podAnnotations:
eks.amazonaws.com/compute-type: fargate
EOT
]

depends_on = [
# Need to ensure the CoreDNS updates are peformed before provisioning
null_resource.modify_kube_dns
]
}

################################################################################
Expand Down
10 changes: 7 additions & 3 deletions examples/fargate_profile/versions.tf
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,13 @@ terraform {
source = "hashicorp/aws"
version = ">= 3.72"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.10"
helm = {
source = "hashicorp/helm"
version = ">= 2.7"
}
null = {
source = "hashicorp/null"
version = ">= 3.0"
}
}
}
5 changes: 3 additions & 2 deletions main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,8 @@ locals {

cluster_security_group_id = local.create_cluster_sg ? aws_security_group.cluster[0].id : var.cluster_security_group_id

cluster_security_group_rules = {
# Do not add rules to node security group if the module is not creating it
cluster_security_group_rules = local.create_node_sg ? {
ingress_nodes_443 = {
description = "Node groups to cluster API"
protocol = "tcp"
Expand All @@ -153,7 +154,7 @@ locals {
type = "egress"
source_node_security_group = true
}
}
} : {}
}

resource "aws_security_group" "cluster" {
Expand Down