diff --git a/README.md b/README.md
index a1967e46c8..80846bc9f0 100644
--- a/README.md
+++ b/README.md
@@ -200,6 +200,89 @@ module "eks" {
}
```
+### IRSA Integration
+
+An [IAM role for service accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) module has been created to work in conjunction with the EKS module. The [`iam-role-for-service-accounts`](https://github.com/terraform-aws-modules/terraform-aws-iam/tree/master/modules/iam-role-for-service-accounts-eks) module has a set of pre-defined IAM policies for common addons/controllers/custom resources to allow users to quickly enable common integrations. Check [`policy.tf`](https://github.com/terraform-aws-modules/terraform-aws-iam/blob/master/modules/iam-role-for-service-accounts-eks/policies.tf) for a list of the policies currently supported. A example of this integration is shown below, and more can be found in the [`iam-role-for-service-accounts`](https://github.com/terraform-aws-modules/terraform-aws-iam/blob/master/examples/iam-role-for-service-accounts-eks/main.tf example directory):
+
+```hcl
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+
+ cluster_name = "example"
+ cluster_version = "1.21"
+
+ cluster_addons = {
+ vpc-cni = {
+ resolve_conflicts = "OVERWRITE"
+ service_account_role_arn = module.vpc_cni_irsa.iam_role_arn
+ }
+ }
+
+ vpc_id = "vpc-1234556abcdef"
+ subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"]
+
+ eks_managed_node_group_defaults = {
+ # We are using the IRSA created below for permissions
+ # This is a better practice as well so that the nodes do not have the permission,
+ # only the VPC CNI addon will have the permission
+ iam_role_attach_cni_policy = false
+ }
+
+ eks_managed_node_groups = {
+ default = {}
+ }
+
+ tags = {
+ Environment = "dev"
+ Terraform = "true"
+ }
+}
+
+module "vpc_cni_irsa" {
+ source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
+
+ role_name = "vpc_cni"
+ attach_vpc_cni_policy = true
+ vpc_cni_enable_ipv4 = true
+
+ oidc_providers = {
+ main = {
+ provider_arn = module.eks.oidc_provider_arn
+ namespace_service_accounts = ["kube-system:aws-node"]
+ }
+ }
+
+ tags = {
+ Environment = "dev"
+ Terraform = "true"
+ }
+}
+
+module "karpenter_irsa" {
+ source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
+
+ role_name = "karpenter_controller"
+ attach_karpenter_controller_policy = true
+
+ karpenter_controller_cluster_ids = [module.eks.cluster_id]
+ karpenter_controller_node_iam_role_arns = [
+ module.eks.eks_managed_node_groups["default"].iam_role_arn
+ ]
+
+ oidc_providers = {
+ main = {
+ provider_arn = module.eks.oidc_provider_arn
+ namespace_service_accounts = ["karpenter:karpenter"]
+ }
+ }
+
+ tags = {
+ Environment = "dev"
+ Terraform = "true"
+ }
+}
+```
+
## Node Group Configuration
⚠️ The configurations shown below are referenced from within the root EKS module; there will be slight differences in the default values provided when compared to the underlying sub-modules (`eks-managed-node-group`, `self-managed-node-group`, and `fargate-profile`).
diff --git a/examples/eks_managed_node_group/README.md b/examples/eks_managed_node_group/README.md
index 61e793615b..769a9cbd30 100644
--- a/examples/eks_managed_node_group/README.md
+++ b/examples/eks_managed_node_group/README.md
@@ -75,6 +75,7 @@ Note that this example may create resources which cost money. Run `terraform des
|------|--------|---------|
| [eks](#module\_eks) | ../.. | n/a |
| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
+| [vpc\_cni\_irsa](#module\_vpc\_cni\_irsa) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | ~> 4.12 |
## Resources
diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf
index f985b49c57..3ef80700b8 100644
--- a/examples/eks_managed_node_group/main.tf
+++ b/examples/eks_managed_node_group/main.tf
@@ -29,8 +29,7 @@ module "eks" {
cluster_endpoint_public_access = true
# IPV6
- cluster_ip_family = "ipv6"
- create_cni_ipv6_iam_policy = true
+ cluster_ip_family = "ipv6"
cluster_addons = {
coredns = {
@@ -38,7 +37,8 @@ module "eks" {
}
kube-proxy = {}
vpc-cni = {
- resolve_conflicts = "OVERWRITE"
+ resolve_conflicts = "OVERWRITE"
+ service_account_role_arn = module.vpc_cni_irsa.iam_role_arn
}
}
@@ -87,6 +87,9 @@ module "eks" {
ami_type = "AL2_x86_64"
disk_size = 50
instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
+
+ # We are using the IRSA created below for permissions
+ iam_role_attach_cni_policy = false
}
eks_managed_node_groups = {
@@ -421,6 +424,24 @@ module "vpc" {
tags = local.tags
}
+module "vpc_cni_irsa" {
+ source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
+ version = "~> 4.12"
+
+ role_name_prefix = "VPC-CNI-IRSA"
+ attach_vpc_cni_policy = true
+ vpc_cni_enable_ipv6 = true
+
+ oidc_providers = {
+ main = {
+ provider_arn = module.eks.oidc_provider_arn
+ namespace_service_accounts = ["kube-system:aws-node"]
+ }
+ }
+
+ tags = local.tags
+}
+
resource "aws_security_group" "additional" {
name_prefix = "${local.name}-additional"
vpc_id = module.vpc.vpc_id
diff --git a/examples/irsa_autoscale_refresh/README.md b/examples/irsa_autoscale_refresh/README.md
index ac65d52e29..c30a6f6b41 100644
--- a/examples/irsa_autoscale_refresh/README.md
+++ b/examples/irsa_autoscale_refresh/README.md
@@ -40,10 +40,10 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Source | Version |
|------|--------|---------|
-| [aws\_node\_termination\_handler\_role](#module\_aws\_node\_termination\_handler\_role) | terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc | ~> 4.0 |
| [aws\_node\_termination\_handler\_sqs](#module\_aws\_node\_termination\_handler\_sqs) | terraform-aws-modules/sqs/aws | ~> 3.0 |
+| [cluster\_autoscaler\_irsa](#module\_cluster\_autoscaler\_irsa) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | ~> 4.12 |
| [eks](#module\_eks) | ../.. | n/a |
-| [iam\_assumable\_role\_cluster\_autoscaler](#module\_iam\_assumable\_role\_cluster\_autoscaler) | terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc | ~> 4.0 |
+| [node\_termination\_handler\_irsa](#module\_node\_termination\_handler\_irsa) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | ~> 4.12 |
| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
## Resources
@@ -55,17 +55,13 @@ Note that this example may create resources which cost money. Run `terraform des
| [aws_cloudwatch_event_rule.aws_node_termination_handler_spot](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource |
| [aws_cloudwatch_event_target.aws_node_termination_handler_asg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource |
| [aws_cloudwatch_event_target.aws_node_termination_handler_spot](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource |
-| [aws_iam_policy.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
-| [aws_iam_policy.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [helm_release.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
| [helm_release.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
| [null_resource.apply](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
-| [aws_iam_policy_document.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_iam_policy_document.aws_node_termination_handler_sqs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_policy_document.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
## Inputs
diff --git a/examples/irsa_autoscale_refresh/charts.tf b/examples/irsa_autoscale_refresh/charts.tf
index 9d3c1d3d89..956b8de9f4 100644
--- a/examples/irsa_autoscale_refresh/charts.tf
+++ b/examples/irsa_autoscale_refresh/charts.tf
@@ -32,7 +32,7 @@ resource "helm_release" "cluster_autoscaler" {
set {
name = "rbac.serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
- value = module.iam_assumable_role_cluster_autoscaler.iam_role_arn
+ value = module.cluster_autoscaler_irsa.iam_role_arn
type = "string"
}
@@ -57,63 +57,24 @@ resource "helm_release" "cluster_autoscaler" {
]
}
-module "iam_assumable_role_cluster_autoscaler" {
- source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
- version = "~> 4.0"
+module "cluster_autoscaler_irsa" {
+ source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
+ version = "~> 4.12"
- create_role = true
role_name_prefix = "cluster-autoscaler"
role_description = "IRSA role for cluster autoscaler"
- provider_url = replace(module.eks.cluster_oidc_issuer_url, "https://", "")
- role_policy_arns = [aws_iam_policy.cluster_autoscaler.arn]
- oidc_fully_qualified_subjects = ["system:serviceaccount:kube-system:cluster-autoscaler-aws"]
- oidc_fully_qualified_audiences = ["sts.amazonaws.com"]
+ attach_cluster_autoscaler_policy = true
+ cluster_autoscaler_cluster_ids = [module.eks.cluster_id]
- tags = local.tags
-}
-
-resource "aws_iam_policy" "cluster_autoscaler" {
- name = "KarpenterControllerPolicy-refresh"
- policy = data.aws_iam_policy_document.cluster_autoscaler.json
-
- tags = local.tags
-}
-
-data "aws_iam_policy_document" "cluster_autoscaler" {
- statement {
- sid = "clusterAutoscalerAll"
- actions = [
- "autoscaling:DescribeAutoScalingGroups",
- "autoscaling:DescribeAutoScalingInstances",
- "autoscaling:DescribeLaunchConfigurations",
- "autoscaling:DescribeTags",
- "ec2:DescribeLaunchTemplateVersions",
- ]
- resources = ["*"]
- }
-
- statement {
- sid = "clusterAutoscalerOwn"
- actions = [
- "autoscaling:SetDesiredCapacity",
- "autoscaling:TerminateInstanceInAutoScalingGroup",
- "autoscaling:UpdateAutoScalingGroup",
- ]
- resources = ["*"]
-
- condition {
- test = "StringEquals"
- variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/${module.eks.cluster_id}"
- values = ["owned"]
- }
-
- condition {
- test = "StringEquals"
- variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled"
- values = ["true"]
+ oidc_providers = {
+ main = {
+ provider_arn = module.eks.oidc_provider_arn
+ namespace_service_accounts = ["kube-system:cluster-autoscaler-aws"]
}
}
+
+ tags = local.tags
}
################################################################################
@@ -142,7 +103,7 @@ resource "helm_release" "aws_node_termination_handler" {
set {
name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
- value = module.aws_node_termination_handler_role.iam_role_arn
+ value = module.node_termination_handler_irsa.iam_role_arn
type = "string"
}
@@ -172,51 +133,24 @@ resource "helm_release" "aws_node_termination_handler" {
]
}
-module "aws_node_termination_handler_role" {
- source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
- version = "~> 4.0"
+module "node_termination_handler_irsa" {
+ source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
+ version = "~> 4.12"
- create_role = true
role_name_prefix = "node-termination-handler"
role_description = "IRSA role for node termination handler"
- provider_url = replace(module.eks.cluster_oidc_issuer_url, "https://", "")
- role_policy_arns = [aws_iam_policy.aws_node_termination_handler.arn]
- oidc_fully_qualified_subjects = ["system:serviceaccount:kube-system:aws-node-termination-handler"]
- oidc_fully_qualified_audiences = ["sts.amazonaws.com"]
+ attach_node_termination_handler_policy = true
+ node_termination_handler_sqs_queue_arns = [module.aws_node_termination_handler_sqs.sqs_queue_arn]
- tags = local.tags
-}
-
-resource "aws_iam_policy" "aws_node_termination_handler" {
- name = "${local.name}-aws-node-termination-handler"
- policy = data.aws_iam_policy_document.aws_node_termination_handler.json
-
- tags = local.tags
-}
-
-data "aws_iam_policy_document" "aws_node_termination_handler" {
- statement {
- actions = [
- "ec2:DescribeInstances",
- "autoscaling:DescribeAutoScalingInstances",
- "autoscaling:DescribeTags",
- ]
- resources = ["*"]
- }
-
- statement {
- actions = ["autoscaling:CompleteLifecycleAction"]
- resources = [for group in module.eks.self_managed_node_groups : group.autoscaling_group_arn]
+ oidc_providers = {
+ main = {
+ provider_arn = module.eks.oidc_provider_arn
+ namespace_service_accounts = ["kube-system:aws-node-termination-handler"]
+ }
}
- statement {
- actions = [
- "sqs:DeleteMessage",
- "sqs:ReceiveMessage"
- ]
- resources = [module.aws_node_termination_handler_sqs.sqs_queue_arn]
- }
+ tags = local.tags
}
module "aws_node_termination_handler_sqs" {
diff --git a/main.tf b/main.tf
index f124c4e755..853df4ba1e 100644
--- a/main.tf
+++ b/main.tf
@@ -183,7 +183,7 @@ data "aws_iam_policy_document" "assume_role_policy" {
principals {
type = "Service"
- identifiers = ["eks.amazonaws.com"]
+ identifiers = ["eks.${data.aws_partition.current.dns_suffix}"]
}
}
}
@@ -234,10 +234,6 @@ resource "aws_eks_addon" "this" {
]
}
- # Note: if an addon needs to be provisioned ahead of a node group users will
- # need to create the addon outside of this module until a 2nd addon resource is added
- # to the module (here) that is not dependent on node groups
- # Or if addon management improves, this dependency can be removed https://github.com/aws/containers-roadmap/issues/1389
depends_on = [
module.fargate_profile,
module.eks_managed_node_group,