-
Notifications
You must be signed in to change notification settings - Fork 0
/
worker.tf
107 lines (96 loc) · 3.48 KB
/
worker.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
resource "aws_security_group" "demo_node" {
name = "terraform-eks-demo-node-worker"
description = "Security group for all nodes in the cluster"
vpc_id = "${module.vpc.vpc_id}"
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = "${
map(
"Name", "terraform-eks-demo-node",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
)
}"
}
resource "aws_security_group_rule" "demo_node_ingress_self" {
description = "Allow node to communicate with each other"
from_port = 0
protocol = "-1"
security_group_id = "${aws_security_group.demo_node.id}"
source_security_group_id = "${aws_security_group.demo_node.id}"
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "demo_node_ingress_cluster" {
description = "Allow worker Kubelets and pods to receive communication from the cluster control plane"
from_port = 1025
protocol = "tcp"
security_group_id = "${aws_security_group.demo_node.id}"
source_security_group_id = "${aws_security_group.demo_cluster.id}"
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "demo_cluster_ingress_node_https" {
description = "Allow pods to communicate with the cluster API Server"
from_port = 443
protocol = "tcp"
security_group_id = "${aws_security_group.demo_cluster.id}"
source_security_group_id = "${aws_security_group.demo_node.id}"
to_port = 443
type = "ingress"
}
# EKS currently documents this required userdata for EKS worker nodes to
# properly configure Kubernetes applications on the EC2 instance.
# We utilize a Terraform local here to simplify Base64 encoding this
# information into the AutoScaling Launch Configuration.
# More information: https://docs.aws.amazon.com/eks/latest/userguide/launch-workers.html
locals {
demo-node-userdata = <<USERDATA
#!/bin/bash
set -o xtrace
/etc/eks/bootstrap.sh --apiserver-endpoint '${aws_eks_cluster.demo.endpoint}' --b64-cluster-ca '${aws_eks_cluster.demo.certificate_authority.0.data}' '${var.cluster_name}'
USERDATA
}
module "worker_fleet" {
source = "./spot_fleet"
fleet_size = "${var.fleet_size}"
region = "${var.region}"
key_name = "${var.key_name}"
security_group_id = "${aws_security_group.demo_node.id}"
env = "test"
vpc_id = "${module.vpc.vpc_id}"
userdata = "${local.demo-node-userdata}"
ami_id = "${data.aws_ami.eks_worker.id}"
service_name = "${var.cluster_name}"
subnet_ids = ["${module.vpc.public_subnets}"]
instance_policy_arns = [
"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
]
tags = "${
map(
"Name", "terraform-eks-demo-node",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
)
}"
}
locals {
config-map-aws-auth = <<CONFIGMAPAWSAUTH
apiVersion: v1
kind: ConfigMap
metadata:
name: aws-auth
namespace: kube-system
data:
mapRoles: |
- rolearn: ${module.worker_fleet.instance_role}
username: system:node:{{EC2PrivateDNSName}}
groups:
- system:bootstrappers
- system:nodes
CONFIGMAPAWSAUTH
}