diff --git a/packages/@aws-cdk/aws-cloudformation/lib/custom-resource.ts b/packages/@aws-cdk/aws-cloudformation/lib/custom-resource.ts index 17974dbe72aa2..98edc567efa78 100644 --- a/packages/@aws-cdk/aws-cloudformation/lib/custom-resource.ts +++ b/packages/@aws-cdk/aws-cloudformation/lib/custom-resource.ts @@ -100,6 +100,17 @@ export class CustomResource extends Resource { this.resource.applyRemovalPolicy(props.removalPolicy, { default: RemovalPolicy.DESTROY }); } + /** + * The physical name of this custom resource. + */ + public get ref() { + return this.resource.ref; + } + + /** + * An attribute of this custom resource + * @param attributeName the attribute name + */ public getAtt(attributeName: string) { return this.resource.getAtt(attributeName); } diff --git a/packages/@aws-cdk/aws-cloudformation/test/test.resource.ts b/packages/@aws-cdk/aws-cloudformation/test/test.resource.ts index 015119e1b8416..968fc9c4cc180 100644 --- a/packages/@aws-cdk/aws-cloudformation/test/test.resource.ts +++ b/packages/@aws-cdk/aws-cloudformation/test/test.resource.ts @@ -196,9 +196,21 @@ export = testCase({ }, }, + + '.ref returns the intrinsic reference (physical name)'(test: Test) { + // GIVEN + const stack = new cdk.Stack(); + const res = new TestCustomResource(stack, 'myResource'); + + // THEN + test.deepEqual(stack.resolve(res.resource.ref), { Ref: 'myResourceC6A188A9' }); + test.done(); + } }); class TestCustomResource extends cdk.Construct { + public readonly resource: CustomResource; + constructor(scope: cdk.Construct, id: string, opts: { removalPolicy?: cdk.RemovalPolicy } = {}) { super(scope, id); @@ -210,7 +222,7 @@ class TestCustomResource extends cdk.Construct { timeout: cdk.Duration.minutes(5), }); - new CustomResource(this, 'Resource', { + this.resource = new CustomResource(this, 'Resource', { ...opts, provider: CustomResourceProvider.lambda(singletonLambda), }); diff --git a/packages/@aws-cdk/aws-eks/README.md b/packages/@aws-cdk/aws-eks/README.md index 11387de922f2b..48f157e906c43 100644 --- a/packages/@aws-cdk/aws-eks/README.md +++ b/packages/@aws-cdk/aws-eks/README.md @@ -15,20 +15,176 @@ --- -This construct library allows you to define and create [Amazon Elastic Container Service for Kubernetes (EKS)](https://aws.amazon.com/eks/) clusters programmatically. +This construct library allows you to define [Amazon Elastic Container Service +for Kubernetes (EKS)](https://aws.amazon.com/eks/) clusters programmatically. -### Example +This library also supports programmatically defining Kubernetes resource +manifests within EKS clusters. -The following example shows how to start an EKS cluster and how to -add worker nodes to it: +This example defines an Amazon EKS cluster with a single pod: -[starting a cluster example](test/integ.eks-cluster.lit.ts) +```ts +const cluster = new eks.Cluster(this, 'hello-eks', { vpc }); -After deploying the previous CDK app you still need to configure `kubectl` -and manually add the nodes to your cluster, as described [in the EKS user -guide](https://docs.aws.amazon.com/eks/latest/userguide/launch-workers.html). +cluster.addCapacity('default', { + instanceType: new ec2.InstanceType('t2.medium'), + desiredCapacity: 10, +}); -### SSH into your nodes +cluster.addResource('mypod', { + apiVersion: 'v1', + kind: 'Pod', + metadata: { name: 'mypod' }, + spec: { + containers: [ + { + name: 'hello', + image: 'paulbouwer/hello-kubernetes:1.5', + ports: [ { containerPort: 8080 } ] + } + ] + } +}); +``` + +**NOTE**: in order to determine the default AMI for for Amazon EKS instances the +`eks.Cluster` resource must be defined within a stack that is configured with an +explicit `env.region`. See [Environments](https://docs.aws.amazon.com/cdk/latest/guide/environments.html) +in the AWS CDK Developer Guide for more details. + +Here is a [complete sample](https://github.com/aws/aws-cdk/blob/master/packages/%40aws-cdk/aws-eks/test/integ.eks-kubectl.lit.ts). + +### Interacting with Your Cluster + +The Amazon EKS construct library allows you to specify an IAM role that will be +granted `system:masters` privileges on your cluster. + +Without specifying a `mastersRole`, you will not be able to interact manually +with the cluster. + +The following example defines an IAM role that can be assumed by all users +in the account and shows how to use the `mastersRole` property to map this +role to the Kubernetes `system:masters` group: + +```ts +// first define the role +const clusterAdmin = new iam.Role(this, 'AdminRole', { + assumedBy: new iam.AccountRootPrincipal() +}); + +// now define the cluster and map role to "masters" RBAC group +new eks.Cluster(this, 'Cluster', { + vpc: vpc, + mastersRole: clusterAdmin +}); +``` + +Now, given AWS credentials for a user that is trusted by the masters role, you +will be able to interact with your cluster like this: + +```console +$ aws eks update-kubeconfig --name CLUSTER-NAME +$ kubectl get all -n kube-system +... +``` + +For your convenience, an AWS CloudFormation output will automatically be +included in your template and will be printed when running `cdk deploy`. + +**NOTE**: if the cluster is configured with `kubectlEnabled: false`, it +will be created with the role/user that created the AWS CloudFormation +stack. See [Kubectl Support](#kubectl-support) for details. + +### Defining Kubernetes Resources + +The `KubernetesResource` construct or `cluster.addResource` method can be used +to apply Kubernetes resource manifests to this cluster. + +The following examples will deploy the [paulbouwer/hello-kubernetes](https://github.com/paulbouwer/hello-kubernetes) +service on the cluster: + +```ts +const appLabel = { app: "hello-kubernetes" }; + +const deployment = { + apiVersion: "apps/v1", + kind: "Deployment", + metadata: { name: "hello-kubernetes" }, + spec: { + replicas: 3, + selector: { matchLabels: appLabel }, + template: { + metadata: { labels: appLabel }, + spec: { + containers: [ + { + name: "hello-kubernetes", + image: "paulbouwer/hello-kubernetes:1.5", + ports: [ { containerPort: 8080 } ] + } + ] + } + } + } +}; + +const service = { + apiVersion: "v1", + kind: "Service", + metadata: { name: "hello-kubernetes" }, + spec: { + type: "LoadBalancer", + ports: [ { port: 80, targetPort: 8080 } ], + selector: appLabel + } +}; + +// option 1: use a construct +new KubernetesResource(this, 'hello-kub', { + cluster, + manifest: [ deployment, service ] +}); + +// or, option2: use `addResource` +cluster.addResource('hello-kub', service, deployment); +``` + +Since Kubernetes resources are implemented as CloudFormation resources in the +CDK. This means that if the resource is deleted from your code (or the stack is +deleted), the next `cdk deploy` will issue a `kubectl delete` command and the +Kubernetes resources will be deleted. + +### AWS IAM Mapping + +As described in the [Amazon EKS User Guide](https://docs.aws.amazon.com/en_us/eks/latest/userguide/add-user-role.html), +you can map AWS IAM users and roles to [Kubernetes Role-based access control (RBAC)](https://kubernetes.io/docs/reference/access-authn-authz/rbac). + +The Amazon EKS construct manages the **aws-auth ConfigMap** Kubernetes resource +on your behalf and exposes an API through the `cluster.awsAuth` for mapping +users, roles and accounts. + +Furthermore, when auto-scaling capacity is added to the cluster (through +`cluster.addCapacity` or `cluster.addAutoScalingGroup`), the IAM instance role +of the auto-scaling group will be automatically mapped to RBAC so nodes can +connect to the cluster. No manual mapping is required any longer. + +> NOTE: `cluster.awsAuth` will throw an error if your cluster is created with `kubectlEnabled: false`. + +For example, let's say you want to grant an IAM user administrative privileges +on your cluster: + +```ts +const adminUser = new iam.User(this, 'Admin'); +cluster.awsAuth.addUserMapping(adminUser, { groups: [ 'system:masters' ]}); +``` + +A convenience method for mapping a role to the `system:masters` group is also available: + +```ts +cluster.awsAuth.addMastersRole(role) +``` + +### Node ssh Access If you want to be able to SSH into your worker nodes, you must already have an SSH key in the region you're connecting to and pass it, and you must @@ -41,7 +197,69 @@ If you want to SSH into nodes in a private subnet, you should set up a bastion host in a public subnet. That setup is recommended, but is unfortunately beyond the scope of this documentation. +### kubectl Support + +When you create an Amazon EKS cluster, the IAM entity user or role, such as a +[federated user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers.html) +that creates the cluster, is automatically granted `system:masters` permissions +in the cluster's RBAC configuration. + +In order to allow programmatically defining **Kubernetes resources** in your AWS +CDK app and provisioning them through AWS CloudFormation, we will need to assume +this "masters" role every time we want to issue `kubectl` operations against your +cluster. + +At the moment, the [AWS::EKS::Cluster](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-eks-cluster.html) +AWS CloudFormation resource does not support this behavior, so in order to +support "programmatic kubectl", such as applying manifests +and mapping IAM roles from within your CDK application, the Amazon EKS +construct library uses a custom resource for provisioning the cluster. +This custom resource is executed with an IAM role that we can then use +to issue `kubectl` commands. + +The default behavior of this library is to use this custom resource in order +to retain programmatic control over the cluster. In other words: to allow +you to define Kubernetes resources in your CDK code instead of having to +manage your Kubernetes applications through a separate system. + +One of the implications of this design is that, by default, the user who +provisioned the AWS CloudFormation stack (executed `cdk deploy`) will +not have administrative privileges on the EKS cluster. + +1. Additional resources will be synthesized into your template (the AWS Lambda + function, the role and policy). +2. As described in [Interacting with Your Cluster](#interacting-with-your-cluster), + if you wish to be able to manually interact with your cluster, you will need + to map an IAM role or user to the `system:masters` group. This can be either + done by specifying a `mastersRole` when the cluster is defined, calling + `cluster.addMastersRole` or explicitly mapping an IAM role or IAM user to the + relevant Kubernetes RBAC groups using `cluster.addRoleMapping` and/or + `cluster.addUserMapping`. + +If you wish to disable the programmatic kubectl behavior and use the standard +AWS::EKS::Cluster resource, you can specify `kubectlEnabled: false` when you define +the cluster: + +```ts +new eks.Cluster(this, 'cluster', { + vpc: vpc, + kubectlEnabled: false +}); +``` + +**Take care**: a change in this property will cause the cluster to be destroyed +and a new cluster to be created. + +When kubectl is disabled, you should be aware of the following: + +1. When you log-in to your cluster, you don't need to specify `--role-arn` as long as you are using the same user that created + the cluster. +2. As described in the Amazon EKS User Guide, you will need to manually + edit the [aws-auth ConfigMap](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html) when you add capacity in order to map + the IAM instance role to RBAC to allow nodes to join the cluster. +3. Any `eks.Cluster` APIs that depend on programmatic kubectl support will fail with an error: `cluster.addResource`, `cluster.awsAuth`, `props.mastersRole`. + ### Roadmap -- [ ] Add ability to start tasks on clusters using CDK (similar to ECS's "`Service`" concept). -- [ ] Describe how to set up AutoScaling (how to combine EC2 and Kubernetes scaling) \ No newline at end of file +- [ ] AutoScaling (combine EC2 and Kubernetes scaling) +- [ ] Spot fleet support diff --git a/packages/@aws-cdk/aws-eks/lib/aws-auth-mapping.ts b/packages/@aws-cdk/aws-eks/lib/aws-auth-mapping.ts new file mode 100644 index 0000000000000..7763646a8771a --- /dev/null +++ b/packages/@aws-cdk/aws-eks/lib/aws-auth-mapping.ts @@ -0,0 +1,16 @@ + +export interface Mapping { + /** + * The user name within Kubernetes to map to the IAM role. + * + * @default - By default, the user name is the ARN of the IAM role. + */ + readonly username?: string; + + /** + * A list of groups within Kubernetes to which the role is mapped. + * + * @see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#default-roles-and-role-bindings + */ + readonly groups: string[]; +} diff --git a/packages/@aws-cdk/aws-eks/lib/aws-auth.ts b/packages/@aws-cdk/aws-eks/lib/aws-auth.ts new file mode 100644 index 0000000000000..2555c05dbba55 --- /dev/null +++ b/packages/@aws-cdk/aws-eks/lib/aws-auth.ts @@ -0,0 +1,119 @@ +import iam = require('@aws-cdk/aws-iam'); +import { Construct, Lazy, Stack } from '@aws-cdk/core'; +import { Mapping } from './aws-auth-mapping'; +import { Cluster } from './cluster'; +import { KubernetesResource } from './k8s-resource'; + +export interface AwsAuthProps { + /** + * The EKS cluster to apply this configuration to. + * + * [disable-awslint:ref-via-interface] + */ + readonly cluster: Cluster; +} + +/** + * Manages mapping between IAM users and roles to Kubernetes RBAC configuration. + * + * @see https://docs.aws.amazon.com/en_us/eks/latest/userguide/add-user-role.html + */ +export class AwsAuth extends Construct { + private readonly stack: Stack; + private readonly roleMappings = new Array<{ role: iam.IRole, mapping: Mapping }>(); + private readonly userMappings = new Array<{ user: iam.IUser, mapping: Mapping }>(); + private readonly accounts = new Array(); + + constructor(scope: Construct, id: string, props: AwsAuthProps) { + super(scope, id); + + this.stack = Stack.of(this); + + new KubernetesResource(this, 'manifest', { + cluster: props.cluster, + manifest: [ + { + apiVersion: "v1", + kind: "ConfigMap", + metadata: { + name: "aws-auth", + namespace: "kube-system" + }, + data: { + mapRoles: this.synthesizeMapRoles(), + mapUsers: this.synthesizeMapUsers(), + mapAccounts: this.synthesizeMapAccounts(), + } + } + ] + }); + } + + /** + * Adds the specified IAM role to the `system:masters` RBAC group, which means + * that anyone that can assume it will be able to administer this Kubernetes system. + * + * @param role The IAM role to add + * @param username Optional user (defaults to the role ARN) + */ + public addMastersRole(role: iam.IRole, username?: string) { + this.addRoleMapping(role, { + username, + groups: [ 'system:masters' ] + }); + } + + /** + * Adds a mapping between an IAM role to a Kubernetes user and groups. + * + * @param role The IAM role to map + * @param mapping Mapping to k8s user name and groups + */ + public addRoleMapping(role: iam.IRole, mapping: Mapping) { + this.roleMappings.push({ role, mapping }); + } + + /** + * Adds a mapping between an IAM user to a Kubernetes user and groups. + * + * @param user The IAM user to map + * @param mapping Mapping to k8s user name and groups + */ + public addUserMapping(user: iam.IUser, mapping: Mapping) { + this.userMappings.push({ user, mapping }); + } + + /** + * Additional AWS account to add to the aws-auth configmap. + * @param accountId account number + */ + public addAccount(accountId: string) { + this.accounts.push(accountId); + } + + private synthesizeMapRoles() { + return Lazy.anyValue({ + produce: () => this.stack.toJsonString(this.roleMappings.map(m => ({ + rolearn: m.role.roleArn, + username: m.mapping.username, + groups: m.mapping.groups + }))) + }); + } + + private synthesizeMapUsers() { + return Lazy.anyValue({ + produce: () => this.stack.toJsonString(this.userMappings.map(m => ({ + userarn: this.stack.formatArn({ service: 'iam', resource: 'user', resourceName: m.user.userName }), + username: m.mapping.username, + groups: m.mapping.groups + }))) + }); + } + + private synthesizeMapAccounts() { + return Lazy.anyValue({ + produce: () => this.stack.toJsonString(this.accounts) + }); + } +} diff --git a/packages/@aws-cdk/aws-eks/lib/cluster-resource.ts b/packages/@aws-cdk/aws-eks/lib/cluster-resource.ts new file mode 100644 index 0000000000000..131cc3d270cda --- /dev/null +++ b/packages/@aws-cdk/aws-eks/lib/cluster-resource.ts @@ -0,0 +1,83 @@ +import cfn = require('@aws-cdk/aws-cloudformation'); +import { PolicyStatement } from '@aws-cdk/aws-iam'; +import iam = require('@aws-cdk/aws-iam'); +import lambda = require('@aws-cdk/aws-lambda'); +import { Construct, Duration, Token } from '@aws-cdk/core'; +import path = require('path'); +import { CfnClusterProps } from './eks.generated'; +import { KubectlLayer } from './kubectl-layer'; + +/** + * A low-level CFN resource Amazon EKS cluster implemented through a custom + * resource. + * + * Implements EKS create/update/delete through a CloudFormation custom resource + * in order to allow us to control the IAM role which creates the cluster. This + * is required in order to be able to allow CloudFormation to interact with the + * cluster via `kubectl` to enable Kubernetes management capabilities like apply + * manifest and IAM role/user RBAC mapping. + */ +export class ClusterResource extends Construct { + /** + * The AWS CloudFormation resource type used for this resource. + */ + public static readonly RESOURCE_TYPE = 'Custom::AWSCDK-EKS-Cluster'; + + public readonly attrEndpoint: string; + public readonly attrArn: string; + public readonly attrCertificateAuthorityData: string; + public readonly ref: string; + + /** + * The IAM role which created the cluster. Initially this is the only IAM role + * that gets administrator privilages on the cluster (`system:masters`), and + * will be able to issue `kubectl` commands against it. + */ + public readonly creationRole: iam.IRole; + + constructor(scope: Construct, id: string, props: CfnClusterProps) { + super(scope, id); + + // each cluster resource will have it's own lambda handler since permissions + // are scoped to this cluster and related resources like it's role + const handler = new lambda.Function(this, 'ResourceHandler', { + code: lambda.Code.asset(path.join(__dirname, 'cluster-resource')), + runtime: lambda.Runtime.PYTHON_3_7, + handler: 'index.handler', + timeout: Duration.minutes(15), + memorySize: 512, + layers: [ KubectlLayer.getOrCreate(this) ], + }); + + if (!props.roleArn) { + throw new Error(`"roleArn" is required`); + } + + // since we don't know the cluster name at this point, we must give this role star resource permissions + handler.addToRolePolicy(new PolicyStatement({ + actions: [ 'eks:CreateCluster', 'eks:DescribeCluster', 'eks:DeleteCluster' ], + resources: [ '*' ] + })); + + // the CreateCluster API will allow the cluster to assume this role, so we + // need to allow the lambda execution role to pass it. + handler.addToRolePolicy(new PolicyStatement({ + actions: [ 'iam:PassRole' ], + resources: [ props.roleArn ] + })); + + const resource = new cfn.CustomResource(this, 'Resource', { + resourceType: ClusterResource.RESOURCE_TYPE, + provider: cfn.CustomResourceProvider.lambda(handler), + properties: { + Config: props + } + }); + + this.ref = resource.ref; + this.attrEndpoint = Token.asString(resource.getAtt('Endpoint')); + this.attrArn = Token.asString(resource.getAtt('Arn')); + this.attrCertificateAuthorityData = Token.asString(resource.getAtt('CertificateAuthorityData')); + this.creationRole = handler.role!; + } +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-eks/lib/cluster-resource/index.py b/packages/@aws-cdk/aws-eks/lib/cluster-resource/index.py new file mode 100644 index 0000000000000..f953c2dccfae9 --- /dev/null +++ b/packages/@aws-cdk/aws-eks/lib/cluster-resource/index.py @@ -0,0 +1,137 @@ +import subprocess +import os +import json +import logging +import sys + +sys.path.insert(0, '/opt/awscli') +import botocore.session +from botocore.vendored import requests + +logger = logging.getLogger() +logger.setLevel(logging.INFO) + +CFN_SUCCESS = "SUCCESS" +CFN_FAILED = "FAILED" + +# these are coming from the kubectl layer +os.environ['PATH'] = '/opt/kubectl:/opt/awscli:' + os.environ['PATH'] + +outdir = os.environ.get('TEST_OUTDIR', '/tmp') +kubeconfig = os.path.join(outdir, 'kubeconfig') + +def handler(event, context): + + def cfn_error(message=None): + logger.error("| cfn_error: %s" % message) + cfn_send(event, context, CFN_FAILED, reason=message) + + try: + logger.info(json.dumps(event)) + + stack_id = event['StackId'] + request_id = event['RequestId'] # used to generate cluster name + request_type = event['RequestType'] + props = event['ResourceProperties'] + old_props = event.get('OldResourceProperties', {}) + physical_id = event.get('PhysicalResourceId', None) + config = props['Config'] + + logger.info(json.dumps(config)) + + session = botocore.session.get_session() + eks = session.create_client('eks'); + + # determine cluster name: the it can either be explicitly + # specified in the resource properties or brought in from + # the physical id. for "Create" operations, if the cluster + # name is not created, it is allocated from the request id + cluster_name=config.get('name', None) + if cluster_name is None: + if physical_id: cluster_name = physical_id + elif request_type == 'Create': cluster_name = "cluster-%s" % request_id + else: raise Exception("unexpected error. cannot determine cluster name") + config['name'] = cluster_name + logger.info("request: %s" % config) + + # delete is a special case + if request_type == 'Delete': + logger.info('deleting cluster') + eks.delete_cluster(name=cluster_name) + logger.info('waiting for cluster to be deleted...') + waiter = eks.get_waiter('cluster_deleted') + waiter.wait(name=cluster_name) + cfn_send(event, context, CFN_SUCCESS, physicalResourceId=cluster_name) + return + + if request_type == 'Create': + logger.info("creating cluster %s" % cluster_name) + resp = eks.create_cluster(**config) + logger.info("create response: %s" % resp) + elif request_type == 'Update': + logger.info("updating cluster %s" % cluster_name) + resp = eks.update_cluster_config(**config) + logger.info("update response: %s" % resp) + else: + raise Exception("Invalid request type %s" % request_type) + + # wait for the cluster to become active (13min timeout) + logger.info('waiting for cluster to become active...') + waiter = eks.get_waiter('cluster_active') + waiter.wait(name=cluster_name, WaiterConfig={ + 'Delay': 30, + 'MaxAttempts': 26 + }) + + resp = eks.describe_cluster(name=cluster_name) + logger.info("describe response: %s" % resp) + attrs = { + 'Name': resp['cluster']['name'], + 'Endpoint': resp['cluster']['endpoint'], + 'Arn': resp['cluster']['arn'], + 'CertificateAuthorityData': resp['cluster']['certificateAuthority']['data'] + } + logger.info("attributes: %s" % attrs) + cfn_send(event, context, CFN_SUCCESS, responseData=attrs, physicalResourceId=cluster_name) + + except KeyError as e: + cfn_error("invalid request. Missing '%s'" % str(e)) + except Exception as e: + logger.exception(e) + cfn_error(str(e)) + +def resp_to_attriburtes(resp): + return + + +#--------------------------------------------------------------------------------------------------- +# sends a response to cloudformation +def cfn_send(event, context, responseStatus, responseData={}, physicalResourceId=None, noEcho=False, reason=None): + + responseUrl = event['ResponseURL'] + logger.info(responseUrl) + + responseBody = {} + responseBody['Status'] = responseStatus + responseBody['Reason'] = reason or ('See the details in CloudWatch Log Stream: ' + context.log_stream_name) + responseBody['PhysicalResourceId'] = physicalResourceId or context.log_stream_name + responseBody['StackId'] = event['StackId'] + responseBody['RequestId'] = event['RequestId'] + responseBody['LogicalResourceId'] = event['LogicalResourceId'] + responseBody['NoEcho'] = noEcho + responseBody['Data'] = responseData + + body = json.dumps(responseBody) + logger.info("| response body:\n" + body) + + headers = { + 'content-type' : '', + 'content-length' : str(len(body)) + } + + try: + response = requests.put(responseUrl, data=body, headers=headers) + logger.info("| status code: " + response.reason) + except Exception as e: + logger.error("| unable to send response to CloudFormation") + logger.exception(e) diff --git a/packages/@aws-cdk/aws-eks/lib/cluster.ts b/packages/@aws-cdk/aws-eks/lib/cluster.ts index 3e516966b6976..db28cd1f5bee6 100644 --- a/packages/@aws-cdk/aws-eks/lib/cluster.ts +++ b/packages/@aws-cdk/aws-eks/lib/cluster.ts @@ -1,11 +1,17 @@ import autoscaling = require('@aws-cdk/aws-autoscaling'); -import ec2 = require('@aws-cdk/aws-ec2'); import { Subnet } from '@aws-cdk/aws-ec2'; +import ec2 = require('@aws-cdk/aws-ec2'); import iam = require('@aws-cdk/aws-iam'); -import { CfnOutput, Construct, IResource, Resource, Tag } from '@aws-cdk/core'; +import lambda = require('@aws-cdk/aws-lambda'); +import { CfnOutput, Construct, Duration, IResource, Resource, Tag } from '@aws-cdk/core'; +import path = require('path'); import { EksOptimizedAmi, nodeTypeForInstanceType } from './ami'; -import { CfnCluster } from './eks.generated'; +import { AwsAuth } from './aws-auth'; +import { ClusterResource } from './cluster-resource'; +import { CfnCluster, CfnClusterProps } from './eks.generated'; import { maxPodsForInstanceType } from './instance-data'; +import { KubernetesResource } from './k8s-resource'; +import { KubectlLayer } from './kubectl-layer'; /** * An EKS cluster @@ -69,6 +75,9 @@ export interface ClusterAttributes { */ readonly clusterCertificateAuthorityData: string; + /** + * The security groups associated with this cluster. + */ readonly securityGroups: ec2.ISecurityGroup[]; } @@ -94,37 +103,73 @@ export interface ClusterProps { * ] * ``` * - * @default All public and private subnets + * @default - All public and private subnets */ readonly vpcSubnets?: ec2.SubnetSelection[]; /** * Role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf. * - * @default A role is automatically created for you + * @default - A role is automatically created for you */ readonly role?: iam.IRole; /** * Name for the cluster. * - * @default Automatically generated name + * @default - Automatically generated name */ readonly clusterName?: string; /** * Security Group to use for Control Plane ENIs * - * @default A security group is automatically created + * @default - A security group is automatically created */ readonly securityGroup?: ec2.ISecurityGroup; /** * The Kubernetes version to run in the cluster * - * @default If not supplied, will use Amazon default version + * @default - If not supplied, will use Amazon default version */ readonly version?: string; + + /** + * An IAM role that will be added to the `system:masters` Kubernetes RBAC + * group. + * + * @see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#default-roles-and-role-bindings + * + * @default - By default, it will only possible to update this Kubernetes + * system by adding resources to this cluster via `addResource` or + * by defining `KubernetesResource` resources in your AWS CDK app. + * Use this if you wish to grant cluster administration privileges + * to another role. + */ + readonly mastersRole?: iam.IRole; + + /** + * Allows defining `kubectrl`-related resources on this cluster. + * + * If this is disabled, it will not be possible to use the following + * capabilities: + * - `addResource` + * - `addRoleMapping` + * - `addUserMapping` + * - `addMastersRole` and `props.mastersRole` + * + * If this is disabled, the cluster can only be managed by issuing `kubectl` + * commands from a session that uses the IAM role/user that created the + * account. + * + * _NOTE_: changing this value will destoy the cluster. This is because a + * managable cluster must be created using an AWS CloudFormation custom + * resource which executes with an IAM role owned by the CDK app. + * + * @default true The cluster can be managed by the AWS CDK application. + */ + readonly kubectlEnabled?: boolean; } /** @@ -189,6 +234,32 @@ export class Cluster extends Resource implements ICluster { */ public readonly role: iam.IRole; + /** + * Indicates if `kubectl` related operations can be performed on this cluster. + */ + public readonly kubectlEnabled: boolean; + + /** + * The CloudFormation custom resource handler that can apply Kubernetes + * manifests to this cluster. + * + * @internal + */ + public readonly _k8sResourceHandler?: lambda.Function; + + /** + * The IAM role that was used to create this cluster. This role is + * automatically added by Amazon EKS to the `system:masters` RBAC group of the + * cluster. Use `addMastersRole` or `props.mastersRole` to define additional + * IAM roles as administrators. + */ + private readonly _defaultMastersRole?: iam.IRole; + + /** + * Manages the aws-auth config map. + */ + private _awsAuth?: AwsAuth; + private readonly version: string | undefined; /** @@ -230,7 +301,7 @@ export class Cluster extends Resource implements ICluster { const placements = props.vpcSubnets || [{ subnetType: ec2.SubnetType.PUBLIC }, { subnetType: ec2.SubnetType.PRIVATE }]; const subnetIds = [...new Set(Array().concat(...placements.map(s => props.vpc.selectSubnets(s).subnetIds)))]; - const resource = new CfnCluster(this, 'Resource', { + const clusterProps: CfnClusterProps = { name: this.physicalName, roleArn: this.role.roleArn, version: props.version, @@ -238,7 +309,16 @@ export class Cluster extends Resource implements ICluster { securityGroupIds: [securityGroup.securityGroupId], subnetIds } - }); + }; + + let resource; + this.kubectlEnabled = props.kubectlEnabled === undefined ? true : props.kubectlEnabled; + if (this.kubectlEnabled) { + resource = new ClusterResource(this, 'Resource', clusterProps); + this._defaultMastersRole = resource.creationRole; + } else { + resource = new CfnCluster(this, 'Resource', clusterProps); + } this.clusterName = this.getResourceNameAttribute(resource.ref); this.clusterArn = this.getResourceArnAttribute(resource.attrArn, { @@ -251,6 +331,20 @@ export class Cluster extends Resource implements ICluster { this.clusterCertificateAuthorityData = resource.attrCertificateAuthorityData; new CfnOutput(this, 'ClusterName', { value: this.clusterName }); + + // we maintain a single manifest custom resource handler per cluster since + // permissions and role are scoped. This will return `undefined` if kubectl + // is not enabled for this cluster. + this._k8sResourceHandler = this.createKubernetesResourceHandler(); + + // map the IAM role to the `system:masters` group. + if (props.mastersRole) { + if (!this.kubectlEnabled) { + throw new Error(`Cannot specify a "masters" role if kubectl is disabled`); + } + + this.awsAuth.addMastersRole(props.mastersRole); + } } /** @@ -259,7 +353,7 @@ export class Cluster extends Resource implements ICluster { * The nodes will automatically be configured with the right VPC and AMI * for the instance type and Kubernetes version. */ - public addCapacity(id: string, options: AddWorkerNodesOptions): autoscaling.AutoScalingGroup { + public addCapacity(id: string, options: CapacityOptions): autoscaling.AutoScalingGroup { const asg = new autoscaling.AutoScalingGroup(this, id, { ...options, vpc: this.vpc, @@ -273,6 +367,7 @@ export class Cluster extends Resource implements ICluster { this.addAutoScalingGroup(asg, { maxPods: maxPodsForInstanceType(options.instanceType), + mapRole: options.mapRole, }); return asg; @@ -292,7 +387,7 @@ export class Cluster extends Resource implements ICluster { * @see https://docs.aws.amazon.com/eks/latest/userguide/launch-workers.html * @param autoScalingGroup [disable-awslint:ref-via-interface] */ - public addAutoScalingGroup(autoScalingGroup: autoscaling.AutoScalingGroup, options: AddAutoScalingGroupOptions) { + public addAutoScalingGroup(autoScalingGroup: autoscaling.AutoScalingGroup, options: AutoScalingGroupOptions) { // self rules autoScalingGroup.connections.allowInternally(ec2.Port.allTraffic()); @@ -326,6 +421,76 @@ export class Cluster extends Resource implements ICluster { new CfnOutput(autoScalingGroup, 'InstanceRoleARN', { value: autoScalingGroup.role.roleArn }); + + if (options.mapRole === true && !this.kubectlEnabled) { + throw new Error(`Cannot map instance IAM role to RBAC if kubectl is disabled for the cluster`); + } + + // do not attempt to map the role if `kubectl` is not enabled for this + // cluster or if `mapRole` is set to false. By default this should happen. + const mapRole = options.mapRole === undefined ? true : options.mapRole; + if (mapRole && this.kubectlEnabled) { + // see https://docs.aws.amazon.com/en_us/eks/latest/userguide/add-user-role.html + this.awsAuth.addRoleMapping(autoScalingGroup.role, { + username: 'system:node:{{EC2PrivateDNSName}}', + groups: [ + 'system:bootstrappers', + 'system:nodes' + ] + }); + } + } + + /** + * Lazily creates the AwsAuth resource, which manages AWS authentication mapping. + */ + public get awsAuth() { + if (!this.kubectlEnabled) { + throw new Error(`Cannot define aws-auth mappings if kubectl is disabled`); + } + + if (!this._awsAuth) { + this._awsAuth = new AwsAuth(this, 'AwsAuth', { cluster: this }); + } + + return this._awsAuth; + } + + /** + * Defines a Kubernetes resource in this cluster. + * + * The manifest will be applied/deleted using kubectl as needed. + * + * @param id logical id of this manifest + * @param manifest a list of Kubernetes resource specifications + * @returns a `KubernetesResource` object. + * @throws If `kubectlEnabled` is `false` + */ + public addResource(id: string, ...manifest: any[]) { + return new KubernetesResource(this, `manifest-${id}`, { cluster: this, manifest }); + } + + private createKubernetesResourceHandler() { + if (!this.kubectlEnabled) { + return undefined; + } + + return new lambda.Function(this, 'KubernetesResourceHandler', { + code: lambda.Code.asset(path.join(__dirname, 'k8s-resource')), + runtime: lambda.Runtime.PYTHON_3_7, + handler: 'index.handler', + timeout: Duration.minutes(15), + layers: [ KubectlLayer.getOrCreate(this) ], + memorySize: 256, + environment: { + CLUSTER_NAME: this.clusterName, + }, + + // NOTE: we must use the default IAM role that's mapped to "system:masters" + // as the execution role of this custom resource handler. This is the only + // way to be able to interact with the cluster after it's been created. + role: this._defaultMastersRole, + }); } /** @@ -351,17 +516,27 @@ export class Cluster extends Resource implements ICluster { /** * Options for adding worker nodes */ -export interface AddWorkerNodesOptions extends autoscaling.CommonAutoScalingGroupProps { +export interface CapacityOptions extends autoscaling.CommonAutoScalingGroupProps { /** * Instance type of the instances to start */ readonly instanceType: ec2.InstanceType; + + /** + * Will automatically update the aws-auth ConfigMap to map the IAM instance + * role to RBAC. + * + * This cannot be explicitly set to `true` if the cluster has kubectl disabled. + * + * @default - true if the cluster has kubectl enabled (which is the default). + */ + readonly mapRole?: boolean; } /** * Options for adding an AutoScalingGroup as capacity */ -export interface AddAutoScalingGroupOptions { +export interface AutoScalingGroupOptions { /** * How many pods to allow on this instance. * @@ -369,6 +544,16 @@ export interface AddAutoScalingGroupOptions { * the instance type less one. */ readonly maxPods: number; + + /** + * Will automatically update the aws-auth ConfigMap to map the IAM instance + * role to RBAC. + * + * This cannot be explicitly set to `true` if the cluster has kubectl disabled. + * + * @default - true if the cluster has kubectl enabled (which is the default). + */ + readonly mapRole?: boolean; } /** diff --git a/packages/@aws-cdk/aws-eks/lib/index.ts b/packages/@aws-cdk/aws-eks/lib/index.ts index d6e9e82ec6a90..424286019ba74 100644 --- a/packages/@aws-cdk/aws-eks/lib/index.ts +++ b/packages/@aws-cdk/aws-eks/lib/index.ts @@ -1,5 +1,8 @@ -export * from "./cluster"; -export * from "./ami"; +export * from './cluster'; +export * from './ami'; +export * from './aws-auth-mapping'; +export * from './k8s-resource'; +export * from './aws-auth'; // AWS::EKS CloudFormation Resources: -export * from "./eks.generated"; +export * from './eks.generated'; diff --git a/packages/@aws-cdk/aws-eks/lib/k8s-resource.ts b/packages/@aws-cdk/aws-eks/lib/k8s-resource.ts new file mode 100644 index 0000000000000..9762ae9c93fa6 --- /dev/null +++ b/packages/@aws-cdk/aws-eks/lib/k8s-resource.ts @@ -0,0 +1,73 @@ +import cfn = require('@aws-cdk/aws-cloudformation'); +import { Construct, Stack } from '@aws-cdk/core'; +import { Cluster } from './cluster'; + +export interface KubernetesResourceProps { + /** + * The EKS cluster to apply this configuration to. + * + * [disable-awslint:ref-via-interface] + */ + readonly cluster: Cluster; + + /** + * The resource manifest. + * + * Consists of any number of child resources. + * + * When the resource is created/updated, this manifest will be applied to the + * cluster through `kubectl apply` and when the resource or the stack is + * deleted, the manifest will be deleted through `kubectl delete`. + * + * @example + * + * { + * apiVersion: 'v1', + * kind: 'Pod', + * metadata: { name: 'mypod' }, + * spec: { + * containers: [ { name: 'hello', image: 'paulbouwer/hello-kubernetes:1.5', ports: [ { containerPort: 8080 } ] } ] + * } + * } + * + */ + readonly manifest: any[]; +} + +/** + * Represents a resource within the Kubernetes system. + * + * Alternatively, you can use `cluster.addResource(resource[, resource, ...])` + * to define resources on this cluster. + * + * Applies/deletes the resources using `kubectl` in sync with the resource. + */ +export class KubernetesResource extends Construct { + /** + * The CloudFormation reosurce type. + */ + public static readonly RESOURCE_TYPE = 'Custom::AWSCDK-EKS-KubernetesResource'; + + constructor(scope: Construct, id: string, props: KubernetesResourceProps) { + super(scope, id); + + const stack = Stack.of(this); + + // we maintain a single manifest custom resource handler for each cluster + const handler = props.cluster._k8sResourceHandler; + if (!handler) { + throw new Error(`Cannot define a KubernetesManifest resource on a cluster with kubectl disabled`); + } + + new cfn.CustomResource(this, 'Resource', { + provider: cfn.CustomResourceProvider.lambda(handler), + resourceType: KubernetesResource.RESOURCE_TYPE, + properties: { + // `toJsonString` enables embedding CDK tokens in the manifest and will + // render a CloudFormation-compatible JSON string (similar to + // StepFunctions, CloudWatch Dashboards etc). + Manifest: stack.toJsonString(props.manifest), + } + }); + } +} diff --git a/packages/@aws-cdk/aws-eks/lib/k8s-resource/index.py b/packages/@aws-cdk/aws-eks/lib/k8s-resource/index.py new file mode 100644 index 0000000000000..3f16e9cb5a305 --- /dev/null +++ b/packages/@aws-cdk/aws-eks/lib/k8s-resource/index.py @@ -0,0 +1,123 @@ +import subprocess +import os +import json +import logging +import boto3 +from uuid import uuid4 +from botocore.vendored import requests + +logger = logging.getLogger() +logger.setLevel(logging.INFO) + +# these are coming from the kubectl layer +os.environ['PATH'] = '/opt/kubectl:/opt/awscli:' + os.environ['PATH'] + +outdir = os.environ.get('TEST_OUTDIR', '/tmp') +kubeconfig = os.path.join(outdir, 'kubeconfig') + +CFN_SUCCESS = "SUCCESS" +CFN_FAILED = "FAILED" + +def handler(event, context): + + def cfn_error(message=None): + logger.error("| cfn_error: %s" % message) + cfn_send(event, context, CFN_FAILED, reason=message) + + try: + logger.info(json.dumps(event)) + + request_type = event['RequestType'] + props = event['ResourceProperties'] + old_props = event.get('OldResourceProperties', {}) + physical_id = event.get('PhysicalResourceId', None) + manifest_text = props['Manifest'] + + cluster_name = os.environ.get('CLUSTER_NAME', None) + if cluster_name is None: + cfn_error("CLUSTER_NAME is missing in environment") + return + + # "log in" to the cluster + subprocess.check_call([ 'aws', 'eks', 'update-kubeconfig', + '--name', cluster_name, + '--kubeconfig', kubeconfig + ]) + + # write resource manifests in sequence: { r1 }{ r2 }{ r3 } (this is how + # a stream of JSON objects can be included in a k8s manifest). + manifest_list = json.loads(manifest_text) + manifest_file = os.path.join(outdir, 'manifest.yaml') + with open(manifest_file, "w") as f: + f.writelines(map(lambda obj: json.dumps(obj), manifest_list)) + + logger.info("manifest written to: %s" % manifest_file) + + if request_type == 'Create' or request_type == 'Update': + kubectl('apply', manifest_file) + elif request_type == "Delete": + try: + kubectl('delete', manifest_file) + except Exception as e: + logger.info("delete error: %s" % e) + + # if we are creating a new resource, allocate a physical id for it + # otherwise, we expect physical id to be relayed by cloudformation + if request_type == 'Create': + physical_id = "%s/%s" % (cluster_name, str(uuid4())) + else: + if not physical_id: + cfn_error("invalid request: request type is '%s' but 'PhysicalResourceId' is not defined" % request_type) + return + + cfn_send(event, context, CFN_SUCCESS, physicalResourceId=physical_id) + return + + except KeyError as e: + cfn_error("invalid request. Missing '%s'" % str(e)) + except Exception as e: + logger.exception(e) + cfn_error(str(e)) + +def kubectl(verb, file): + import subprocess + try: + cmnd = ['kubectl', verb, '--kubeconfig', kubeconfig, '-f', file] + output = subprocess.check_output(cmnd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as exc: + raise Exception(exc.output) + else: + logger.info(output) + + +#--------------------------------------------------------------------------------------------------- +# sends a response to cloudformation +def cfn_send(event, context, responseStatus, responseData={}, physicalResourceId=None, noEcho=False, reason=None): + + responseUrl = event['ResponseURL'] + logger.info(responseUrl) + + responseBody = {} + responseBody['Status'] = responseStatus + responseBody['Reason'] = reason or ('See the details in CloudWatch Log Stream: ' + context.log_stream_name) + responseBody['PhysicalResourceId'] = physicalResourceId or context.log_stream_name + responseBody['StackId'] = event['StackId'] + responseBody['RequestId'] = event['RequestId'] + responseBody['LogicalResourceId'] = event['LogicalResourceId'] + responseBody['NoEcho'] = noEcho + responseBody['Data'] = responseData + + body = json.dumps(responseBody) + logger.info("| response body:\n" + body) + + headers = { + 'content-type' : '', + 'content-length' : str(len(body)) + } + + try: + response = requests.put(responseUrl, data=body, headers=headers) + logger.info("| status code: " + response.reason) + except Exception as e: + logger.error("| unable to send response to CloudFormation") + logger.exception(e) diff --git a/packages/@aws-cdk/aws-eks/lib/kubectl-layer.ts b/packages/@aws-cdk/aws-eks/lib/kubectl-layer.ts new file mode 100644 index 0000000000000..1ecebf38ee168 --- /dev/null +++ b/packages/@aws-cdk/aws-eks/lib/kubectl-layer.ts @@ -0,0 +1,78 @@ +import lambda = require('@aws-cdk/aws-lambda'); +import { CfnResource, Construct, Stack, Token } from '@aws-cdk/core'; +import crypto = require('crypto'); + +const KUBECTL_APP_ARN = 'arn:aws:serverlessrepo:us-east-1:903779448426:applications/lambda-layer-kubectl'; +const KUBECTL_APP_VERSION = '1.13.7'; + +export interface KubectlLayerProps { + /** + * The semantic version of the kubectl AWS Lambda Layer SAR app to use. + * + * @default '1.13.7' + */ + readonly version?: string; +} + +/** + * An AWS Lambda layer that includes kubectl and the AWS CLI. + * + * @see https://github.com/aws-samples/aws-lambda-layer-kubectl + */ +export class KubectlLayer extends Construct implements lambda.ILayerVersion { + + /** + * Gets or create a singleton instance of this construct. + */ + public static getOrCreate(scope: Construct, props: KubectlLayerProps = {}): KubectlLayer { + const stack = Stack.of(scope); + const id = 'kubectl-layer-8C2542BC-BF2B-4DFE-B765-E181FD30A9A0'; + const exists = stack.node.tryFindChild(id) as KubectlLayer; + if (exists) { + return exists; + } + + return new KubectlLayer(stack, id, props); + } + + /** + * The ARN of the AWS Lambda layer version. + */ + public readonly layerVersionArn: string; + + /** + * All runtimes are compatible. + */ + public readonly compatibleRuntimes?: lambda.Runtime[] = undefined; + + constructor(scope: Construct, id: string, props: KubectlLayerProps = {}) { + super(scope, id); + + const uniqueId = crypto.createHash('md5').update(this.node.path).digest("hex"); + const version = props.version || KUBECTL_APP_VERSION; + + this.stack.templateOptions.transforms = [ 'AWS::Serverless-2016-10-31' ]; // required for AWS::Serverless + const resource = new CfnResource(this, 'Resource', { + type: 'AWS::Serverless::Application', + properties: { + Location: { + ApplicationId: KUBECTL_APP_ARN, + SemanticVersion: version + }, + Parameters: { + LayerName: `kubectl-${uniqueId}` + } + } + }); + + this.layerVersionArn = Token.asString(resource.getAtt('Outputs.LayerVersionArn')); + } + + public get stack() { + return Stack.of(this); + } + + public addPermission(_id: string, _permission: lambda.LayerVersionPermission): void { + return; + } +} diff --git a/packages/@aws-cdk/aws-eks/package.json b/packages/@aws-cdk/aws-eks/package.json index 30f31158dfceb..be56ad721715d 100644 --- a/packages/@aws-cdk/aws-eks/package.json +++ b/packages/@aws-cdk/aws-eks/package.json @@ -70,6 +70,8 @@ }, "dependencies": { "@aws-cdk/aws-autoscaling": "^1.3.0", + "@aws-cdk/aws-cloudformation": "^1.3.0", + "@aws-cdk/aws-lambda": "^1.3.0", "@aws-cdk/aws-ec2": "^1.3.0", "@aws-cdk/aws-iam": "^1.3.0", "@aws-cdk/core": "^1.3.0" @@ -77,6 +79,8 @@ "homepage": "https://github.com/aws/aws-cdk", "peerDependencies": { "@aws-cdk/aws-autoscaling": "^1.3.0", + "@aws-cdk/aws-cloudformation": "^1.3.0", + "@aws-cdk/aws-lambda": "^1.3.0", "@aws-cdk/aws-ec2": "^1.3.0", "@aws-cdk/aws-iam": "^1.3.0", "@aws-cdk/core": "^1.3.0" diff --git a/packages/@aws-cdk/aws-eks/test/MANUAL_TEST.md b/packages/@aws-cdk/aws-eks/test/MANUAL_TEST.md index 463e4c5f2a0b3..a552429343e08 100644 --- a/packages/@aws-cdk/aws-eks/test/MANUAL_TEST.md +++ b/packages/@aws-cdk/aws-eks/test/MANUAL_TEST.md @@ -50,7 +50,7 @@ ssh -L 3000::3000 ssh-box-somewhere.example.com ``` Clean the services before you stop the cluster to get rid of the load balancer -(otherwise you won't be able to delet the stack): +(otherwise you won't be able to delete the stack): ``` kubectl delete --all services diff --git a/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.kubectl-disabled.expected.json b/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.kubectl-disabled.expected.json new file mode 100644 index 0000000000000..49df9abfb7b16 --- /dev/null +++ b/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.kubectl-disabled.expected.json @@ -0,0 +1,939 @@ +{ + "Resources": { + "VPCB9E5F0B4": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + "EnableDnsHostnames": true, + "EnableDnsSupport": true, + "InstanceTenancy": "default", + "Tags": [ + { + "Key": "Name", + "Value": "eks-integ-test/VPC" + } + ] + } + }, + "VPCPublicSubnet1SubnetB4246D30": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.0.0/19", + "VpcId": { + "Ref": "VPCB9E5F0B4" + }, + "AvailabilityZone": "test-region-1a", + "MapPublicIpOnLaunch": true, + "Tags": [ + { + "Key": "Name", + "Value": "eks-integ-test/VPC/PublicSubnet1" + }, + { + "Key": "aws-cdk:subnet-name", + "Value": "Public" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Public" + } + ] + } + }, + "VPCPublicSubnet1RouteTableFEE4B781": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "VPCB9E5F0B4" + }, + "Tags": [ + { + "Key": "Name", + "Value": "eks-integ-test/VPC/PublicSubnet1" + } + ] + } + }, + "VPCPublicSubnet1RouteTableAssociation0B0896DC": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "VPCPublicSubnet1RouteTableFEE4B781" + }, + "SubnetId": { + "Ref": "VPCPublicSubnet1SubnetB4246D30" + } + } + }, + "VPCPublicSubnet1DefaultRoute91CEF279": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "VPCPublicSubnet1RouteTableFEE4B781" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "GatewayId": { + "Ref": "VPCIGWB7E252D3" + } + }, + "DependsOn": [ + "VPCVPCGW99B986DC" + ] + }, + "VPCPublicSubnet1EIP6AD938E8": { + "Type": "AWS::EC2::EIP", + "Properties": { + "Domain": "vpc" + } + }, + "VPCPublicSubnet1NATGatewayE0556630": { + "Type": "AWS::EC2::NatGateway", + "Properties": { + "AllocationId": { + "Fn::GetAtt": [ + "VPCPublicSubnet1EIP6AD938E8", + "AllocationId" + ] + }, + "SubnetId": { + "Ref": "VPCPublicSubnet1SubnetB4246D30" + }, + "Tags": [ + { + "Key": "Name", + "Value": "eks-integ-test/VPC/PublicSubnet1" + } + ] + } + }, + "VPCPublicSubnet2Subnet74179F39": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.32.0/19", + "VpcId": { + "Ref": "VPCB9E5F0B4" + }, + "AvailabilityZone": "test-region-1b", + "MapPublicIpOnLaunch": true, + "Tags": [ + { + "Key": "Name", + "Value": "eks-integ-test/VPC/PublicSubnet2" + }, + { + "Key": "aws-cdk:subnet-name", + "Value": "Public" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Public" + } + ] + } + }, + "VPCPublicSubnet2RouteTable6F1A15F1": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "VPCB9E5F0B4" + }, + "Tags": [ + { + "Key": "Name", + "Value": "eks-integ-test/VPC/PublicSubnet2" + } + ] + } + }, + "VPCPublicSubnet2RouteTableAssociation5A808732": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "VPCPublicSubnet2RouteTable6F1A15F1" + }, + "SubnetId": { + "Ref": "VPCPublicSubnet2Subnet74179F39" + } + } + }, + "VPCPublicSubnet2DefaultRouteB7481BBA": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "VPCPublicSubnet2RouteTable6F1A15F1" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "GatewayId": { + "Ref": "VPCIGWB7E252D3" + } + }, + "DependsOn": [ + "VPCVPCGW99B986DC" + ] + }, + "VPCPublicSubnet2EIP4947BC00": { + "Type": "AWS::EC2::EIP", + "Properties": { + "Domain": "vpc" + } + }, + "VPCPublicSubnet2NATGateway3C070193": { + "Type": "AWS::EC2::NatGateway", + "Properties": { + "AllocationId": { + "Fn::GetAtt": [ + "VPCPublicSubnet2EIP4947BC00", + "AllocationId" + ] + }, + "SubnetId": { + "Ref": "VPCPublicSubnet2Subnet74179F39" + }, + "Tags": [ + { + "Key": "Name", + "Value": "eks-integ-test/VPC/PublicSubnet2" + } + ] + } + }, + "VPCPublicSubnet3Subnet631C5E25": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.64.0/19", + "VpcId": { + "Ref": "VPCB9E5F0B4" + }, + "AvailabilityZone": "test-region-1c", + "MapPublicIpOnLaunch": true, + "Tags": [ + { + "Key": "Name", + "Value": "eks-integ-test/VPC/PublicSubnet3" + }, + { + "Key": "aws-cdk:subnet-name", + "Value": "Public" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Public" + } + ] + } + }, + "VPCPublicSubnet3RouteTable98AE0E14": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "VPCB9E5F0B4" + }, + "Tags": [ + { + "Key": "Name", + "Value": "eks-integ-test/VPC/PublicSubnet3" + } + ] + } + }, + "VPCPublicSubnet3RouteTableAssociation427FE0C6": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "VPCPublicSubnet3RouteTable98AE0E14" + }, + "SubnetId": { + "Ref": "VPCPublicSubnet3Subnet631C5E25" + } + } + }, + "VPCPublicSubnet3DefaultRouteA0D29D46": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "VPCPublicSubnet3RouteTable98AE0E14" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "GatewayId": { + "Ref": "VPCIGWB7E252D3" + } + }, + "DependsOn": [ + "VPCVPCGW99B986DC" + ] + }, + "VPCPublicSubnet3EIPAD4BC883": { + "Type": "AWS::EC2::EIP", + "Properties": { + "Domain": "vpc" + } + }, + "VPCPublicSubnet3NATGatewayD3048F5C": { + "Type": "AWS::EC2::NatGateway", + "Properties": { + "AllocationId": { + "Fn::GetAtt": [ + "VPCPublicSubnet3EIPAD4BC883", + "AllocationId" + ] + }, + "SubnetId": { + "Ref": "VPCPublicSubnet3Subnet631C5E25" + }, + "Tags": [ + { + "Key": "Name", + "Value": "eks-integ-test/VPC/PublicSubnet3" + } + ] + } + }, + "VPCPrivateSubnet1Subnet8BCA10E0": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.96.0/19", + "VpcId": { + "Ref": "VPCB9E5F0B4" + }, + "AvailabilityZone": "test-region-1a", + "MapPublicIpOnLaunch": false, + "Tags": [ + { + "Key": "Name", + "Value": "eks-integ-test/VPC/PrivateSubnet1" + }, + { + "Key": "aws-cdk:subnet-name", + "Value": "Private" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Private" + }, + { + "Key": "kubernetes.io/role/internal-elb", + "Value": "1" + } + ] + } + }, + "VPCPrivateSubnet1RouteTableBE8A6027": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "VPCB9E5F0B4" + }, + "Tags": [ + { + "Key": "Name", + "Value": "eks-integ-test/VPC/PrivateSubnet1" + }, + { + "Key": "kubernetes.io/role/internal-elb", + "Value": "1" + } + ] + } + }, + "VPCPrivateSubnet1RouteTableAssociation347902D1": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "VPCPrivateSubnet1RouteTableBE8A6027" + }, + "SubnetId": { + "Ref": "VPCPrivateSubnet1Subnet8BCA10E0" + } + } + }, + "VPCPrivateSubnet1DefaultRouteAE1D6490": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "VPCPrivateSubnet1RouteTableBE8A6027" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "NatGatewayId": { + "Ref": "VPCPublicSubnet1NATGatewayE0556630" + } + } + }, + "VPCPrivateSubnet2SubnetCFCDAA7A": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.128.0/19", + "VpcId": { + "Ref": "VPCB9E5F0B4" + }, + "AvailabilityZone": "test-region-1b", + "MapPublicIpOnLaunch": false, + "Tags": [ + { + "Key": "Name", + "Value": "eks-integ-test/VPC/PrivateSubnet2" + }, + { + "Key": "aws-cdk:subnet-name", + "Value": "Private" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Private" + }, + { + "Key": "kubernetes.io/role/internal-elb", + "Value": "1" + } + ] + } + }, + "VPCPrivateSubnet2RouteTable0A19E10E": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "VPCB9E5F0B4" + }, + "Tags": [ + { + "Key": "Name", + "Value": "eks-integ-test/VPC/PrivateSubnet2" + }, + { + "Key": "kubernetes.io/role/internal-elb", + "Value": "1" + } + ] + } + }, + "VPCPrivateSubnet2RouteTableAssociation0C73D413": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "VPCPrivateSubnet2RouteTable0A19E10E" + }, + "SubnetId": { + "Ref": "VPCPrivateSubnet2SubnetCFCDAA7A" + } + } + }, + "VPCPrivateSubnet2DefaultRouteF4F5CFD2": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "VPCPrivateSubnet2RouteTable0A19E10E" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "NatGatewayId": { + "Ref": "VPCPublicSubnet2NATGateway3C070193" + } + } + }, + "VPCPrivateSubnet3Subnet3EDCD457": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.160.0/19", + "VpcId": { + "Ref": "VPCB9E5F0B4" + }, + "AvailabilityZone": "test-region-1c", + "MapPublicIpOnLaunch": false, + "Tags": [ + { + "Key": "Name", + "Value": "eks-integ-test/VPC/PrivateSubnet3" + }, + { + "Key": "aws-cdk:subnet-name", + "Value": "Private" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Private" + }, + { + "Key": "kubernetes.io/role/internal-elb", + "Value": "1" + } + ] + } + }, + "VPCPrivateSubnet3RouteTable192186F8": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "VPCB9E5F0B4" + }, + "Tags": [ + { + "Key": "Name", + "Value": "eks-integ-test/VPC/PrivateSubnet3" + }, + { + "Key": "kubernetes.io/role/internal-elb", + "Value": "1" + } + ] + } + }, + "VPCPrivateSubnet3RouteTableAssociationC28D144E": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "VPCPrivateSubnet3RouteTable192186F8" + }, + "SubnetId": { + "Ref": "VPCPrivateSubnet3Subnet3EDCD457" + } + } + }, + "VPCPrivateSubnet3DefaultRoute27F311AE": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "VPCPrivateSubnet3RouteTable192186F8" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "NatGatewayId": { + "Ref": "VPCPublicSubnet3NATGatewayD3048F5C" + } + } + }, + "VPCIGWB7E252D3": { + "Type": "AWS::EC2::InternetGateway", + "Properties": { + "Tags": [ + { + "Key": "Name", + "Value": "eks-integ-test/VPC" + } + ] + } + }, + "VPCVPCGW99B986DC": { + "Type": "AWS::EC2::VPCGatewayAttachment", + "Properties": { + "VpcId": { + "Ref": "VPCB9E5F0B4" + }, + "InternetGatewayId": { + "Ref": "VPCIGWB7E252D3" + } + } + }, + "EKSClusterClusterRoleB72F3251": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": { + "Fn::Join": [ + "", + [ + "eks.", + { + "Ref": "AWS::URLSuffix" + } + ] + ] + } + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/AmazonEKSClusterPolicy" + ] + ] + }, + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/AmazonEKSServicePolicy" + ] + ] + } + ] + } + }, + "EKSClusterControlPlaneSecurityGroup580AD1FE": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "EKS Control Plane Security Group", + "SecurityGroupEgress": [ + { + "CidrIp": "0.0.0.0/0", + "Description": "Allow all outbound traffic by default", + "IpProtocol": "-1" + } + ], + "SecurityGroupIngress": [], + "VpcId": { + "Ref": "VPCB9E5F0B4" + } + } + }, + "EKSClusterControlPlaneSecurityGroupfromeksintegtestEKSClusterNodesInstanceSecurityGroup1F94DB4244376AEF332": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "IpProtocol": "tcp", + "Description": "from eksintegtestEKSClusterNodesInstanceSecurityGroup1F94DB42:443", + "FromPort": 443, + "GroupId": { + "Fn::GetAtt": [ + "EKSClusterControlPlaneSecurityGroup580AD1FE", + "GroupId" + ] + }, + "SourceSecurityGroupId": { + "Fn::GetAtt": [ + "EKSClusterNodesInstanceSecurityGroup460A275E", + "GroupId" + ] + }, + "ToPort": 443 + } + }, + "EKSClusterBA6ECF8F": { + "Type": "AWS::EKS::Cluster", + "Properties": { + "ResourcesVpcConfig": { + "SecurityGroupIds": [ + { + "Fn::GetAtt": [ + "EKSClusterControlPlaneSecurityGroup580AD1FE", + "GroupId" + ] + } + ], + "SubnetIds": [ + { + "Ref": "VPCPublicSubnet1SubnetB4246D30" + }, + { + "Ref": "VPCPublicSubnet2Subnet74179F39" + }, + { + "Ref": "VPCPublicSubnet3Subnet631C5E25" + }, + { + "Ref": "VPCPrivateSubnet1Subnet8BCA10E0" + }, + { + "Ref": "VPCPrivateSubnet2SubnetCFCDAA7A" + }, + { + "Ref": "VPCPrivateSubnet3Subnet3EDCD457" + } + ] + }, + "RoleArn": { + "Fn::GetAtt": [ + "EKSClusterClusterRoleB72F3251", + "Arn" + ] + } + } + }, + "EKSClusterNodesInstanceSecurityGroup460A275E": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "eks-integ-test/EKSCluster/Nodes/InstanceSecurityGroup", + "SecurityGroupEgress": [ + { + "CidrIp": "0.0.0.0/0", + "Description": "Allow all outbound traffic by default", + "IpProtocol": "-1" + } + ], + "SecurityGroupIngress": [], + "Tags": [ + { + "Key": "Name", + "Value": "eks-integ-test/EKSCluster/Nodes" + }, + { + "Key": { + "Fn::Join": [ + "", + [ + "kubernetes.io/cluster/", + { + "Ref": "EKSClusterBA6ECF8F" + } + ] + ] + }, + "Value": "owned" + } + ], + "VpcId": { + "Ref": "VPCB9E5F0B4" + } + } + }, + "EKSClusterNodesInstanceSecurityGroupfromeksintegtestEKSClusterNodesInstanceSecurityGroup1F94DB42ALLTRAFFIC8DF6EC00": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "IpProtocol": "-1", + "Description": "from eksintegtestEKSClusterNodesInstanceSecurityGroup1F94DB42:ALL TRAFFIC", + "GroupId": { + "Fn::GetAtt": [ + "EKSClusterNodesInstanceSecurityGroup460A275E", + "GroupId" + ] + }, + "SourceSecurityGroupId": { + "Fn::GetAtt": [ + "EKSClusterNodesInstanceSecurityGroup460A275E", + "GroupId" + ] + } + } + }, + "EKSClusterNodesInstanceSecurityGroupfromeksintegtestEKSClusterControlPlaneSecurityGroup99328DC644383C2D9E9": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "IpProtocol": "tcp", + "Description": "from eksintegtestEKSClusterControlPlaneSecurityGroup99328DC6:443", + "FromPort": 443, + "GroupId": { + "Fn::GetAtt": [ + "EKSClusterNodesInstanceSecurityGroup460A275E", + "GroupId" + ] + }, + "SourceSecurityGroupId": { + "Fn::GetAtt": [ + "EKSClusterControlPlaneSecurityGroup580AD1FE", + "GroupId" + ] + }, + "ToPort": 443 + } + }, + "EKSClusterNodesInstanceSecurityGroupfromeksintegtestEKSClusterControlPlaneSecurityGroup99328DC61025655350D985847": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "IpProtocol": "tcp", + "Description": "from eksintegtestEKSClusterControlPlaneSecurityGroup99328DC6:1025-65535", + "FromPort": 1025, + "GroupId": { + "Fn::GetAtt": [ + "EKSClusterNodesInstanceSecurityGroup460A275E", + "GroupId" + ] + }, + "SourceSecurityGroupId": { + "Fn::GetAtt": [ + "EKSClusterControlPlaneSecurityGroup580AD1FE", + "GroupId" + ] + }, + "ToPort": 65535 + } + }, + "EKSClusterNodesInstanceRoleEE5595D6": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": { + "Fn::Join": [ + "", + [ + "ec2.", + { + "Ref": "AWS::URLSuffix" + } + ] + ] + } + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/AmazonEKSWorkerNodePolicy" + ] + ] + }, + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/AmazonEKS_CNI_Policy" + ] + ] + }, + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + ] + ] + } + ] + } + }, + "EKSClusterNodesInstanceProfile0F2DB3B9": { + "Type": "AWS::IAM::InstanceProfile", + "Properties": { + "Roles": [ + { + "Ref": "EKSClusterNodesInstanceRoleEE5595D6" + } + ] + } + }, + "EKSClusterNodesLaunchConfig921F1106": { + "Type": "AWS::AutoScaling::LaunchConfiguration", + "Properties": { + "ImageId": "ami-12345", + "InstanceType": "t2.medium", + "IamInstanceProfile": { + "Ref": "EKSClusterNodesInstanceProfile0F2DB3B9" + }, + "SecurityGroups": [ + { + "Fn::GetAtt": [ + "EKSClusterNodesInstanceSecurityGroup460A275E", + "GroupId" + ] + } + ], + "UserData": { + "Fn::Base64": { + "Fn::Join": [ + "", + [ + "#!/bin/bash\nset -o xtrace\n/etc/eks/bootstrap.sh ", + { + "Ref": "EKSClusterBA6ECF8F" + }, + " --use-max-pods 17" + ] + ] + } + } + }, + "DependsOn": [ + "EKSClusterNodesInstanceRoleEE5595D6" + ] + }, + "EKSClusterNodesASGC2597E34": { + "Type": "AWS::AutoScaling::AutoScalingGroup", + "Properties": { + "MaxSize": "1", + "MinSize": "1", + "DesiredCapacity": "1", + "LaunchConfigurationName": { + "Ref": "EKSClusterNodesLaunchConfig921F1106" + }, + "Tags": [ + { + "Key": "Name", + "PropagateAtLaunch": true, + "Value": "eks-integ-test/EKSCluster/Nodes" + }, + { + "Key": { + "Fn::Join": [ + "", + [ + "kubernetes.io/cluster/", + { + "Ref": "EKSClusterBA6ECF8F" + } + ] + ] + }, + "PropagateAtLaunch": true, + "Value": "owned" + } + ], + "VPCZoneIdentifier": [ + { + "Ref": "VPCPrivateSubnet1Subnet8BCA10E0" + }, + { + "Ref": "VPCPrivateSubnet2SubnetCFCDAA7A" + }, + { + "Ref": "VPCPrivateSubnet3Subnet3EDCD457" + } + ] + }, + "UpdatePolicy": { + "AutoScalingRollingUpdate": { + "WaitOnResourceSignals": false, + "PauseTime": "PT0S", + "SuspendProcesses": [ + "HealthCheck", + "ReplaceUnhealthy", + "AZRebalance", + "AlarmNotification", + "ScheduledActions" + ] + }, + "AutoScalingScheduledAction": { + "IgnoreUnmodifiedGroupSizeProperties": true + } + } + } + }, + "Outputs": { + "EKSClusterClusterName2B056109": { + "Value": { + "Ref": "EKSClusterBA6ECF8F" + } + }, + "EKSClusterNodesInstanceRoleARN10992C84": { + "Value": { + "Fn::GetAtt": [ + "EKSClusterNodesInstanceRoleEE5595D6", + "Arn" + ] + } + } + } +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.kubectl-disabled.ts b/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.kubectl-disabled.ts new file mode 100644 index 0000000000000..c324ee06b927f --- /dev/null +++ b/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.kubectl-disabled.ts @@ -0,0 +1,36 @@ +import ec2 = require('@aws-cdk/aws-ec2'); +import cdk = require('@aws-cdk/core'); +import eks = require('../lib'); + +class EksClusterStack extends cdk.Stack { + constructor(scope: cdk.App, id: string, props?: cdk.StackProps) { + super(scope, id, props); + + /// !show + const vpc = new ec2.Vpc(this, 'VPC'); + + const cluster = new eks.Cluster(this, 'EKSCluster', { + vpc, + kubectlEnabled: false + }); + + cluster.addCapacity('Nodes', { + instanceType: new ec2.InstanceType('t2.medium'), + desiredCapacity: 1, // Raise this number to add more nodes + }); + /// !hide + } +} + +const app = new cdk.App(); + +// since the EKS optimized AMI is hard-coded here based on the region, +// we need to actually pass in a specific region. +new EksClusterStack(app, 'eks-integ-test', { + env: { + region: process.env.CDK_INTEG_REGION || process.env.CDK_DEFAULT_REGION, + account: process.env.CDK_INTEG_ACCOUNT || process.env.CDK_DEFAULT_ACCOUNT, + } +}); + +app.synth(); \ No newline at end of file diff --git a/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.lit.expected.json b/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.lit.expected.json index 49df9abfb7b16..54dd085fde746 100644 --- a/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.lit.expected.json +++ b/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.lit.expected.json @@ -1,4 +1,5 @@ { + "Transform": "AWS::Serverless-2016-10-31", "Resources": { "VPCB9E5F0B4": { "Type": "AWS::EC2::VPC", @@ -10,7 +11,7 @@ "Tags": [ { "Key": "Name", - "Value": "eks-integ-test/VPC" + "Value": "eks-integ-test-basic/VPC" } ] } @@ -27,7 +28,7 @@ "Tags": [ { "Key": "Name", - "Value": "eks-integ-test/VPC/PublicSubnet1" + "Value": "eks-integ-test-basic/VPC/PublicSubnet1" }, { "Key": "aws-cdk:subnet-name", @@ -49,7 +50,7 @@ "Tags": [ { "Key": "Name", - "Value": "eks-integ-test/VPC/PublicSubnet1" + "Value": "eks-integ-test-basic/VPC/PublicSubnet1" } ] } @@ -101,7 +102,7 @@ "Tags": [ { "Key": "Name", - "Value": "eks-integ-test/VPC/PublicSubnet1" + "Value": "eks-integ-test-basic/VPC/PublicSubnet1" } ] } @@ -118,7 +119,7 @@ "Tags": [ { "Key": "Name", - "Value": "eks-integ-test/VPC/PublicSubnet2" + "Value": "eks-integ-test-basic/VPC/PublicSubnet2" }, { "Key": "aws-cdk:subnet-name", @@ -140,7 +141,7 @@ "Tags": [ { "Key": "Name", - "Value": "eks-integ-test/VPC/PublicSubnet2" + "Value": "eks-integ-test-basic/VPC/PublicSubnet2" } ] } @@ -192,7 +193,7 @@ "Tags": [ { "Key": "Name", - "Value": "eks-integ-test/VPC/PublicSubnet2" + "Value": "eks-integ-test-basic/VPC/PublicSubnet2" } ] } @@ -209,7 +210,7 @@ "Tags": [ { "Key": "Name", - "Value": "eks-integ-test/VPC/PublicSubnet3" + "Value": "eks-integ-test-basic/VPC/PublicSubnet3" }, { "Key": "aws-cdk:subnet-name", @@ -231,7 +232,7 @@ "Tags": [ { "Key": "Name", - "Value": "eks-integ-test/VPC/PublicSubnet3" + "Value": "eks-integ-test-basic/VPC/PublicSubnet3" } ] } @@ -283,7 +284,7 @@ "Tags": [ { "Key": "Name", - "Value": "eks-integ-test/VPC/PublicSubnet3" + "Value": "eks-integ-test-basic/VPC/PublicSubnet3" } ] } @@ -300,7 +301,7 @@ "Tags": [ { "Key": "Name", - "Value": "eks-integ-test/VPC/PrivateSubnet1" + "Value": "eks-integ-test-basic/VPC/PrivateSubnet1" }, { "Key": "aws-cdk:subnet-name", @@ -326,7 +327,7 @@ "Tags": [ { "Key": "Name", - "Value": "eks-integ-test/VPC/PrivateSubnet1" + "Value": "eks-integ-test-basic/VPC/PrivateSubnet1" }, { "Key": "kubernetes.io/role/internal-elb", @@ -370,7 +371,7 @@ "Tags": [ { "Key": "Name", - "Value": "eks-integ-test/VPC/PrivateSubnet2" + "Value": "eks-integ-test-basic/VPC/PrivateSubnet2" }, { "Key": "aws-cdk:subnet-name", @@ -396,7 +397,7 @@ "Tags": [ { "Key": "Name", - "Value": "eks-integ-test/VPC/PrivateSubnet2" + "Value": "eks-integ-test-basic/VPC/PrivateSubnet2" }, { "Key": "kubernetes.io/role/internal-elb", @@ -440,7 +441,7 @@ "Tags": [ { "Key": "Name", - "Value": "eks-integ-test/VPC/PrivateSubnet3" + "Value": "eks-integ-test-basic/VPC/PrivateSubnet3" }, { "Key": "aws-cdk:subnet-name", @@ -466,7 +467,7 @@ "Tags": [ { "Key": "Name", - "Value": "eks-integ-test/VPC/PrivateSubnet3" + "Value": "eks-integ-test-basic/VPC/PrivateSubnet3" }, { "Key": "kubernetes.io/role/internal-elb", @@ -504,7 +505,7 @@ "Tags": [ { "Key": "Name", - "Value": "eks-integ-test/VPC" + "Value": "eks-integ-test-basic/VPC" } ] } @@ -590,11 +591,11 @@ } } }, - "EKSClusterControlPlaneSecurityGroupfromeksintegtestEKSClusterNodesInstanceSecurityGroup1F94DB4244376AEF332": { + "EKSClusterControlPlaneSecurityGroupfromeksintegtestbasicEKSClusterNodesInstanceSecurityGroup5B890E254434E08C84B": { "Type": "AWS::EC2::SecurityGroupIngress", "Properties": { "IpProtocol": "tcp", - "Description": "from eksintegtestEKSClusterNodesInstanceSecurityGroup1F94DB42:443", + "Description": "from eksintegtestbasicEKSClusterNodesInstanceSecurityGroup5B890E25:443", "FromPort": 443, "GroupId": { "Fn::GetAtt": [ @@ -611,51 +612,274 @@ "ToPort": 443 } }, - "EKSClusterBA6ECF8F": { - "Type": "AWS::EKS::Cluster", + "EKSClusterResourceHandlerServiceRoleFD631254": { + "Type": "AWS::IAM::Role", "Properties": { - "ResourcesVpcConfig": { - "SecurityGroupIds": [ + "AssumeRolePolicyDocument": { + "Statement": [ { - "Fn::GetAtt": [ - "EKSClusterControlPlaneSecurityGroup580AD1FE", - "GroupId" - ] + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": { + "Fn::Join": [ + "", + [ + "lambda.", + { + "Ref": "AWS::URLSuffix" + } + ] + ] + } + } } ], - "SubnetIds": [ - { - "Ref": "VPCPublicSubnet1SubnetB4246D30" - }, - { - "Ref": "VPCPublicSubnet2Subnet74179F39" - }, - { - "Ref": "VPCPublicSubnet3Subnet631C5E25" - }, - { - "Ref": "VPCPrivateSubnet1Subnet8BCA10E0" - }, + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ] + ] + } + ] + } + }, + "EKSClusterResourceHandlerServiceRoleDefaultPolicy4D087A98": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ { - "Ref": "VPCPrivateSubnet2SubnetCFCDAA7A" + "Action": [ + "eks:CreateCluster", + "eks:DescribeCluster", + "eks:DeleteCluster" + ], + "Effect": "Allow", + "Resource": "*" }, { - "Ref": "VPCPrivateSubnet3Subnet3EDCD457" + "Action": "iam:PassRole", + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "EKSClusterClusterRoleB72F3251", + "Arn" + ] + } } + ], + "Version": "2012-10-17" + }, + "PolicyName": "EKSClusterResourceHandlerServiceRoleDefaultPolicy4D087A98", + "Roles": [ + { + "Ref": "EKSClusterResourceHandlerServiceRoleFD631254" + } + ] + } + }, + "EKSClusterResourceHandler31198B21": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Code": { + "S3Bucket": { + "Ref": "EKSClusterResourceHandlerCodeS3BucketCE5CFBAC" + }, + "S3Key": { + "Fn::Join": [ + "", + [ + { + "Fn::Select": [ + 0, + { + "Fn::Split": [ + "||", + { + "Ref": "EKSClusterResourceHandlerCodeS3VersionKey343D88E8" + } + ] + } + ] + }, + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "||", + { + "Ref": "EKSClusterResourceHandlerCodeS3VersionKey343D88E8" + } + ] + } + ] + } + ] + ] + } + }, + "Handler": "index.handler", + "Role": { + "Fn::GetAtt": [ + "EKSClusterResourceHandlerServiceRoleFD631254", + "Arn" ] }, - "RoleArn": { + "Runtime": "python3.7", + "Layers": [ + { + "Fn::GetAtt": [ + "kubectllayer8C2542BCBF2B4DFEB765E181FD30A9A0617C4ADA", + "Outputs.LayerVersionArn" + ] + } + ], + "MemorySize": 512, + "Timeout": 900 + }, + "DependsOn": [ + "EKSClusterResourceHandlerServiceRoleDefaultPolicy4D087A98", + "EKSClusterResourceHandlerServiceRoleFD631254" + ] + }, + "EKSClusterE11008B6": { + "Type": "Custom::AWSCDK-EKS-Cluster", + "Properties": { + "ServiceToken": { "Fn::GetAtt": [ - "EKSClusterClusterRoleB72F3251", + "EKSClusterResourceHandler31198B21", "Arn" ] + }, + "Config": { + "roleArn": { + "Fn::GetAtt": [ + "EKSClusterClusterRoleB72F3251", + "Arn" + ] + }, + "resourcesVpcConfig": { + "securityGroupIds": [ + { + "Fn::GetAtt": [ + "EKSClusterControlPlaneSecurityGroup580AD1FE", + "GroupId" + ] + } + ], + "subnetIds": [ + { + "Ref": "VPCPublicSubnet1SubnetB4246D30" + }, + { + "Ref": "VPCPublicSubnet2Subnet74179F39" + }, + { + "Ref": "VPCPublicSubnet3Subnet631C5E25" + }, + { + "Ref": "VPCPrivateSubnet1Subnet8BCA10E0" + }, + { + "Ref": "VPCPrivateSubnet2SubnetCFCDAA7A" + }, + { + "Ref": "VPCPrivateSubnet3Subnet3EDCD457" + } + ] + } } - } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "EKSClusterKubernetesResourceHandler90E6DD64": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Code": { + "S3Bucket": { + "Ref": "EKSClusterKubernetesResourceHandlerCodeS3Bucket5B1A2C93" + }, + "S3Key": { + "Fn::Join": [ + "", + [ + { + "Fn::Select": [ + 0, + { + "Fn::Split": [ + "||", + { + "Ref": "EKSClusterKubernetesResourceHandlerCodeS3VersionKeyB2871B51" + } + ] + } + ] + }, + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "||", + { + "Ref": "EKSClusterKubernetesResourceHandlerCodeS3VersionKeyB2871B51" + } + ] + } + ] + } + ] + ] + } + }, + "Handler": "index.handler", + "Role": { + "Fn::GetAtt": [ + "EKSClusterResourceHandlerServiceRoleFD631254", + "Arn" + ] + }, + "Runtime": "python3.7", + "Environment": { + "Variables": { + "CLUSTER_NAME": { + "Ref": "EKSClusterE11008B6" + } + } + }, + "Layers": [ + { + "Fn::GetAtt": [ + "kubectllayer8C2542BCBF2B4DFEB765E181FD30A9A0617C4ADA", + "Outputs.LayerVersionArn" + ] + } + ], + "MemorySize": 256, + "Timeout": 900 + }, + "DependsOn": [ + "EKSClusterResourceHandlerServiceRoleDefaultPolicy4D087A98", + "EKSClusterResourceHandlerServiceRoleFD631254" + ] }, "EKSClusterNodesInstanceSecurityGroup460A275E": { "Type": "AWS::EC2::SecurityGroup", "Properties": { - "GroupDescription": "eks-integ-test/EKSCluster/Nodes/InstanceSecurityGroup", + "GroupDescription": "eks-integ-test-basic/EKSCluster/Nodes/InstanceSecurityGroup", "SecurityGroupEgress": [ { "CidrIp": "0.0.0.0/0", @@ -667,7 +891,7 @@ "Tags": [ { "Key": "Name", - "Value": "eks-integ-test/EKSCluster/Nodes" + "Value": "eks-integ-test-basic/EKSCluster/Nodes" }, { "Key": { @@ -676,7 +900,7 @@ [ "kubernetes.io/cluster/", { - "Ref": "EKSClusterBA6ECF8F" + "Ref": "EKSClusterE11008B6" } ] ] @@ -689,11 +913,11 @@ } } }, - "EKSClusterNodesInstanceSecurityGroupfromeksintegtestEKSClusterNodesInstanceSecurityGroup1F94DB42ALLTRAFFIC8DF6EC00": { + "EKSClusterNodesInstanceSecurityGroupfromeksintegtestbasicEKSClusterNodesInstanceSecurityGroup5B890E25ALLTRAFFIC17050541": { "Type": "AWS::EC2::SecurityGroupIngress", "Properties": { "IpProtocol": "-1", - "Description": "from eksintegtestEKSClusterNodesInstanceSecurityGroup1F94DB42:ALL TRAFFIC", + "Description": "from eksintegtestbasicEKSClusterNodesInstanceSecurityGroup5B890E25:ALL TRAFFIC", "GroupId": { "Fn::GetAtt": [ "EKSClusterNodesInstanceSecurityGroup460A275E", @@ -708,11 +932,11 @@ } } }, - "EKSClusterNodesInstanceSecurityGroupfromeksintegtestEKSClusterControlPlaneSecurityGroup99328DC644383C2D9E9": { + "EKSClusterNodesInstanceSecurityGroupfromeksintegtestbasicEKSClusterControlPlaneSecurityGroup389B14F14436EFF5343": { "Type": "AWS::EC2::SecurityGroupIngress", "Properties": { "IpProtocol": "tcp", - "Description": "from eksintegtestEKSClusterControlPlaneSecurityGroup99328DC6:443", + "Description": "from eksintegtestbasicEKSClusterControlPlaneSecurityGroup389B14F1:443", "FromPort": 443, "GroupId": { "Fn::GetAtt": [ @@ -729,11 +953,11 @@ "ToPort": 443 } }, - "EKSClusterNodesInstanceSecurityGroupfromeksintegtestEKSClusterControlPlaneSecurityGroup99328DC61025655350D985847": { + "EKSClusterNodesInstanceSecurityGroupfromeksintegtestbasicEKSClusterControlPlaneSecurityGroup389B14F1102565535BB0D6C6D": { "Type": "AWS::EC2::SecurityGroupIngress", "Properties": { "IpProtocol": "tcp", - "Description": "from eksintegtestEKSClusterControlPlaneSecurityGroup99328DC6:1025-65535", + "Description": "from eksintegtestbasicEKSClusterControlPlaneSecurityGroup389B14F1:1025-65535", "FromPort": 1025, "GroupId": { "Fn::GetAtt": [ @@ -848,7 +1072,7 @@ [ "#!/bin/bash\nset -o xtrace\n/etc/eks/bootstrap.sh ", { - "Ref": "EKSClusterBA6ECF8F" + "Ref": "EKSClusterE11008B6" }, " --use-max-pods 17" ] @@ -873,7 +1097,7 @@ { "Key": "Name", "PropagateAtLaunch": true, - "Value": "eks-integ-test/EKSCluster/Nodes" + "Value": "eks-integ-test-basic/EKSCluster/Nodes" }, { "Key": { @@ -882,7 +1106,7 @@ [ "kubernetes.io/cluster/", { - "Ref": "EKSClusterBA6ECF8F" + "Ref": "EKSClusterE11008B6" } ] ] @@ -919,12 +1143,78 @@ "IgnoreUnmodifiedGroupSizeProperties": true } } + }, + "EKSClusterAwsAuthmanifestA4E0796C": { + "Type": "Custom::AWSCDK-EKS-KubernetesResource", + "Properties": { + "ServiceToken": { + "Fn::GetAtt": [ + "EKSClusterKubernetesResourceHandler90E6DD64", + "Arn" + ] + }, + "Manifest": { + "Fn::Join": [ + "", + [ + "[{\"apiVersion\":\"v1\",\"kind\":\"ConfigMap\",\"metadata\":{\"name\":\"aws-auth\",\"namespace\":\"kube-system\"},\"data\":{\"mapRoles\":\"[{\\\"rolearn\\\":\\\"", + { + "Fn::GetAtt": [ + "EKSClusterNodesInstanceRoleEE5595D6", + "Arn" + ] + }, + "\\\",\\\"username\\\":\\\"system:node:{{EC2PrivateDNSName}}\\\",\\\"groups\\\":[\\\"system:bootstrappers\\\",\\\"system:nodes\\\"]}]\",\"mapUsers\":\"[]\",\"mapAccounts\":\"[]\"}}]" + ] + ] + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "kubectllayer8C2542BCBF2B4DFEB765E181FD30A9A0617C4ADA": { + "Type": "AWS::Serverless::Application", + "Properties": { + "Location": { + "ApplicationId": "arn:aws:serverlessrepo:us-east-1:903779448426:applications/lambda-layer-kubectl", + "SemanticVersion": "1.13.7" + }, + "Parameters": { + "LayerName": "kubectl-de6ff3f9a59243920be5aeee7fc888a7" + } + } + } + }, + "Parameters": { + "EKSClusterResourceHandlerCodeS3BucketCE5CFBAC": { + "Type": "String", + "Description": "S3 bucket for asset \"eks-integ-test-basic/EKSCluster/Resource/ResourceHandler/Code\"" + }, + "EKSClusterResourceHandlerCodeS3VersionKey343D88E8": { + "Type": "String", + "Description": "S3 key for asset version \"eks-integ-test-basic/EKSCluster/Resource/ResourceHandler/Code\"" + }, + "EKSClusterResourceHandlerCodeArtifactHashAECCA27B": { + "Type": "String", + "Description": "Artifact hash for asset \"eks-integ-test-basic/EKSCluster/Resource/ResourceHandler/Code\"" + }, + "EKSClusterKubernetesResourceHandlerCodeS3Bucket5B1A2C93": { + "Type": "String", + "Description": "S3 bucket for asset \"eks-integ-test-basic/EKSCluster/KubernetesResourceHandler/Code\"" + }, + "EKSClusterKubernetesResourceHandlerCodeS3VersionKeyB2871B51": { + "Type": "String", + "Description": "S3 key for asset version \"eks-integ-test-basic/EKSCluster/KubernetesResourceHandler/Code\"" + }, + "EKSClusterKubernetesResourceHandlerCodeArtifactHashA13FF2C6": { + "Type": "String", + "Description": "Artifact hash for asset \"eks-integ-test-basic/EKSCluster/KubernetesResourceHandler/Code\"" } }, "Outputs": { "EKSClusterClusterName2B056109": { "Value": { - "Ref": "EKSClusterBA6ECF8F" + "Ref": "EKSClusterE11008B6" } }, "EKSClusterNodesInstanceRoleARN10992C84": { diff --git a/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.lit.ts b/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.lit.ts index ef7f3baa2fe4c..aafc63398d573 100644 --- a/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.lit.ts +++ b/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.lit.ts @@ -10,7 +10,7 @@ class EksClusterStack extends cdk.Stack { const vpc = new ec2.Vpc(this, 'VPC'); const cluster = new eks.Cluster(this, 'EKSCluster', { - vpc + vpc, }); cluster.addCapacity('Nodes', { @@ -25,7 +25,7 @@ const app = new cdk.App(); // since the EKS optimized AMI is hard-coded here based on the region, // we need to actually pass in a specific region. -new EksClusterStack(app, 'eks-integ-test', { +new EksClusterStack(app, 'eks-integ-test-basic', { env: { region: process.env.CDK_INTEG_REGION || process.env.CDK_DEFAULT_REGION, account: process.env.CDK_INTEG_ACCOUNT || process.env.CDK_DEFAULT_ACCOUNT, diff --git a/packages/@aws-cdk/aws-eks/test/integ.eks-kubectl.lit.expected.json b/packages/@aws-cdk/aws-eks/test/integ.eks-kubectl.lit.expected.json new file mode 100644 index 0000000000000..91782b6b4bbc5 --- /dev/null +++ b/packages/@aws-cdk/aws-eks/test/integ.eks-kubectl.lit.expected.json @@ -0,0 +1,1156 @@ +[ + { + "Resources": { + "vpcA2121C38": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + "EnableDnsHostnames": true, + "EnableDnsSupport": true, + "InstanceTenancy": "default", + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc" + } + ] + } + }, + "vpcPublicSubnet1Subnet2E65531E": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.0.0/18", + "VpcId": { + "Ref": "vpcA2121C38" + }, + "AvailabilityZone": "test-region-1a", + "MapPublicIpOnLaunch": true, + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc/PublicSubnet1" + }, + { + "Key": "aws-cdk:subnet-name", + "Value": "Public" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Public" + } + ] + } + }, + "vpcPublicSubnet1RouteTable48A2DF9B": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "vpcA2121C38" + }, + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc/PublicSubnet1" + } + ] + } + }, + "vpcPublicSubnet1RouteTableAssociation5D3F4579": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "vpcPublicSubnet1RouteTable48A2DF9B" + }, + "SubnetId": { + "Ref": "vpcPublicSubnet1Subnet2E65531E" + } + } + }, + "vpcPublicSubnet1DefaultRoute10708846": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "vpcPublicSubnet1RouteTable48A2DF9B" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "GatewayId": { + "Ref": "vpcIGWE57CBDCA" + } + }, + "DependsOn": [ + "vpcVPCGW7984C166" + ] + }, + "vpcPublicSubnet1EIPDA49DCBE": { + "Type": "AWS::EC2::EIP", + "Properties": { + "Domain": "vpc" + } + }, + "vpcPublicSubnet1NATGateway9C16659E": { + "Type": "AWS::EC2::NatGateway", + "Properties": { + "AllocationId": { + "Fn::GetAtt": [ + "vpcPublicSubnet1EIPDA49DCBE", + "AllocationId" + ] + }, + "SubnetId": { + "Ref": "vpcPublicSubnet1Subnet2E65531E" + }, + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc/PublicSubnet1" + } + ] + } + }, + "vpcPublicSubnet2Subnet009B674F": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.64.0/18", + "VpcId": { + "Ref": "vpcA2121C38" + }, + "AvailabilityZone": "test-region-1b", + "MapPublicIpOnLaunch": true, + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc/PublicSubnet2" + }, + { + "Key": "aws-cdk:subnet-name", + "Value": "Public" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Public" + } + ] + } + }, + "vpcPublicSubnet2RouteTableEB40D4CB": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "vpcA2121C38" + }, + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc/PublicSubnet2" + } + ] + } + }, + "vpcPublicSubnet2RouteTableAssociation21F81B59": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "vpcPublicSubnet2RouteTableEB40D4CB" + }, + "SubnetId": { + "Ref": "vpcPublicSubnet2Subnet009B674F" + } + } + }, + "vpcPublicSubnet2DefaultRouteA1EC0F60": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "vpcPublicSubnet2RouteTableEB40D4CB" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "GatewayId": { + "Ref": "vpcIGWE57CBDCA" + } + }, + "DependsOn": [ + "vpcVPCGW7984C166" + ] + }, + "vpcPublicSubnet2EIP9B3743B1": { + "Type": "AWS::EC2::EIP", + "Properties": { + "Domain": "vpc" + } + }, + "vpcPublicSubnet2NATGateway9B8AE11A": { + "Type": "AWS::EC2::NatGateway", + "Properties": { + "AllocationId": { + "Fn::GetAtt": [ + "vpcPublicSubnet2EIP9B3743B1", + "AllocationId" + ] + }, + "SubnetId": { + "Ref": "vpcPublicSubnet2Subnet009B674F" + }, + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc/PublicSubnet2" + } + ] + } + }, + "vpcPrivateSubnet1Subnet934893E8": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.128.0/18", + "VpcId": { + "Ref": "vpcA2121C38" + }, + "AvailabilityZone": "test-region-1a", + "MapPublicIpOnLaunch": false, + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc/PrivateSubnet1" + }, + { + "Key": "aws-cdk:subnet-name", + "Value": "Private" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Private" + }, + { + "Key": "kubernetes.io/role/internal-elb", + "Value": "1" + } + ] + } + }, + "vpcPrivateSubnet1RouteTableB41A48CC": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "vpcA2121C38" + }, + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc/PrivateSubnet1" + }, + { + "Key": "kubernetes.io/role/internal-elb", + "Value": "1" + } + ] + } + }, + "vpcPrivateSubnet1RouteTableAssociation67945127": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "vpcPrivateSubnet1RouteTableB41A48CC" + }, + "SubnetId": { + "Ref": "vpcPrivateSubnet1Subnet934893E8" + } + } + }, + "vpcPrivateSubnet1DefaultRoute1AA8E2E5": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "vpcPrivateSubnet1RouteTableB41A48CC" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "NatGatewayId": { + "Ref": "vpcPublicSubnet1NATGateway9C16659E" + } + } + }, + "vpcPrivateSubnet2Subnet7031C2BA": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.192.0/18", + "VpcId": { + "Ref": "vpcA2121C38" + }, + "AvailabilityZone": "test-region-1b", + "MapPublicIpOnLaunch": false, + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc/PrivateSubnet2" + }, + { + "Key": "aws-cdk:subnet-name", + "Value": "Private" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Private" + }, + { + "Key": "kubernetes.io/role/internal-elb", + "Value": "1" + } + ] + } + }, + "vpcPrivateSubnet2RouteTable7280F23E": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "vpcA2121C38" + }, + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc/PrivateSubnet2" + }, + { + "Key": "kubernetes.io/role/internal-elb", + "Value": "1" + } + ] + } + }, + "vpcPrivateSubnet2RouteTableAssociation007E94D3": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "vpcPrivateSubnet2RouteTable7280F23E" + }, + "SubnetId": { + "Ref": "vpcPrivateSubnet2Subnet7031C2BA" + } + } + }, + "vpcPrivateSubnet2DefaultRouteB0E07F99": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "vpcPrivateSubnet2RouteTable7280F23E" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "NatGatewayId": { + "Ref": "vpcPublicSubnet2NATGateway9B8AE11A" + } + } + }, + "vpcIGWE57CBDCA": { + "Type": "AWS::EC2::InternetGateway", + "Properties": { + "Tags": [ + { + "Key": "Name", + "Value": "k8s-vpc/vpc" + } + ] + } + }, + "vpcVPCGW7984C166": { + "Type": "AWS::EC2::VPCGatewayAttachment", + "Properties": { + "VpcId": { + "Ref": "vpcA2121C38" + }, + "InternetGatewayId": { + "Ref": "vpcIGWE57CBDCA" + } + } + } + }, + "Outputs": { + "ExportsOutputRefvpcA2121C384D1B3CDE": { + "Value": { + "Ref": "vpcA2121C38" + }, + "Export": { + "Name": "k8s-vpc:ExportsOutputRefvpcA2121C384D1B3CDE" + } + }, + "ExportsOutputRefvpcPublicSubnet1Subnet2E65531ECCB85041": { + "Value": { + "Ref": "vpcPublicSubnet1Subnet2E65531E" + }, + "Export": { + "Name": "k8s-vpc:ExportsOutputRefvpcPublicSubnet1Subnet2E65531ECCB85041" + } + }, + "ExportsOutputRefvpcPublicSubnet2Subnet009B674FB900C242": { + "Value": { + "Ref": "vpcPublicSubnet2Subnet009B674F" + }, + "Export": { + "Name": "k8s-vpc:ExportsOutputRefvpcPublicSubnet2Subnet009B674FB900C242" + } + }, + "ExportsOutputRefvpcPrivateSubnet1Subnet934893E8236E2271": { + "Value": { + "Ref": "vpcPrivateSubnet1Subnet934893E8" + }, + "Export": { + "Name": "k8s-vpc:ExportsOutputRefvpcPrivateSubnet1Subnet934893E8236E2271" + } + }, + "ExportsOutputRefvpcPrivateSubnet2Subnet7031C2BA60DCB1EE": { + "Value": { + "Ref": "vpcPrivateSubnet2Subnet7031C2BA" + }, + "Export": { + "Name": "k8s-vpc:ExportsOutputRefvpcPrivateSubnet2Subnet7031C2BA60DCB1EE" + } + } + } + }, + { + "Transform": "AWS::Serverless-2016-10-31", + "Resources": { + "cluster22ClusterRole5FC933B4": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": { + "Fn::Join": [ + "", + [ + "eks.", + { + "Ref": "AWS::URLSuffix" + } + ] + ] + } + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/AmazonEKSClusterPolicy" + ] + ] + }, + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/AmazonEKSServicePolicy" + ] + ] + } + ] + } + }, + "cluster22ControlPlaneSecurityGroup2648B9CD": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "EKS Control Plane Security Group", + "SecurityGroupEgress": [ + { + "CidrIp": "0.0.0.0/0", + "Description": "Allow all outbound traffic by default", + "IpProtocol": "-1" + } + ], + "SecurityGroupIngress": [], + "VpcId": { + "Fn::ImportValue": "k8s-vpc:ExportsOutputRefvpcA2121C384D1B3CDE" + } + } + }, + "cluster22ControlPlaneSecurityGroupfromk8sclustercluster22NodesInstanceSecurityGroupF903AE86443C3EDA943": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "IpProtocol": "tcp", + "Description": "from k8sclustercluster22NodesInstanceSecurityGroupF903AE86:443", + "FromPort": 443, + "GroupId": { + "Fn::GetAtt": [ + "cluster22ControlPlaneSecurityGroup2648B9CD", + "GroupId" + ] + }, + "SourceSecurityGroupId": { + "Fn::GetAtt": [ + "cluster22NodesInstanceSecurityGroup4A3CDC24", + "GroupId" + ] + }, + "ToPort": 443 + } + }, + "cluster22ResourceHandlerServiceRoleC2E4F327": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": { + "Fn::Join": [ + "", + [ + "lambda.", + { + "Ref": "AWS::URLSuffix" + } + ] + ] + } + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ] + ] + } + ] + } + }, + "cluster22ResourceHandlerServiceRoleDefaultPolicy1D33C3AC": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "eks:CreateCluster", + "eks:DescribeCluster", + "eks:DeleteCluster" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:PassRole", + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "cluster22ClusterRole5FC933B4", + "Arn" + ] + } + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "cluster22ResourceHandlerServiceRoleDefaultPolicy1D33C3AC", + "Roles": [ + { + "Ref": "cluster22ResourceHandlerServiceRoleC2E4F327" + } + ] + } + }, + "cluster22ResourceHandler6227579A": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Code": { + "S3Bucket": { + "Ref": "cluster22ResourceHandlerCodeS3Bucket9D2D8D69" + }, + "S3Key": { + "Fn::Join": [ + "", + [ + { + "Fn::Select": [ + 0, + { + "Fn::Split": [ + "||", + { + "Ref": "cluster22ResourceHandlerCodeS3VersionKeyB44B9CDC" + } + ] + } + ] + }, + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "||", + { + "Ref": "cluster22ResourceHandlerCodeS3VersionKeyB44B9CDC" + } + ] + } + ] + } + ] + ] + } + }, + "Handler": "index.handler", + "Role": { + "Fn::GetAtt": [ + "cluster22ResourceHandlerServiceRoleC2E4F327", + "Arn" + ] + }, + "Runtime": "python3.7", + "Layers": [ + { + "Fn::GetAtt": [ + "kubectllayer8C2542BCBF2B4DFEB765E181FD30A9A0617C4ADA", + "Outputs.LayerVersionArn" + ] + } + ], + "MemorySize": 512, + "Timeout": 900 + }, + "DependsOn": [ + "cluster22ResourceHandlerServiceRoleDefaultPolicy1D33C3AC", + "cluster22ResourceHandlerServiceRoleC2E4F327" + ] + }, + "cluster227BD1CB20": { + "Type": "Custom::AWSCDK-EKS-Cluster", + "Properties": { + "ServiceToken": { + "Fn::GetAtt": [ + "cluster22ResourceHandler6227579A", + "Arn" + ] + }, + "Config": { + "roleArn": { + "Fn::GetAtt": [ + "cluster22ClusterRole5FC933B4", + "Arn" + ] + }, + "resourcesVpcConfig": { + "securityGroupIds": [ + { + "Fn::GetAtt": [ + "cluster22ControlPlaneSecurityGroup2648B9CD", + "GroupId" + ] + } + ], + "subnetIds": [ + { + "Fn::ImportValue": "k8s-vpc:ExportsOutputRefvpcPublicSubnet1Subnet2E65531ECCB85041" + }, + { + "Fn::ImportValue": "k8s-vpc:ExportsOutputRefvpcPublicSubnet2Subnet009B674FB900C242" + }, + { + "Fn::ImportValue": "k8s-vpc:ExportsOutputRefvpcPrivateSubnet1Subnet934893E8236E2271" + }, + { + "Fn::ImportValue": "k8s-vpc:ExportsOutputRefvpcPrivateSubnet2Subnet7031C2BA60DCB1EE" + } + ] + } + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "cluster22KubernetesResourceHandler599F07E6": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Code": { + "S3Bucket": { + "Ref": "cluster22KubernetesResourceHandlerCodeS3Bucket73E6A860" + }, + "S3Key": { + "Fn::Join": [ + "", + [ + { + "Fn::Select": [ + 0, + { + "Fn::Split": [ + "||", + { + "Ref": "cluster22KubernetesResourceHandlerCodeS3VersionKey7C559451" + } + ] + } + ] + }, + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "||", + { + "Ref": "cluster22KubernetesResourceHandlerCodeS3VersionKey7C559451" + } + ] + } + ] + } + ] + ] + } + }, + "Handler": "index.handler", + "Role": { + "Fn::GetAtt": [ + "cluster22ResourceHandlerServiceRoleC2E4F327", + "Arn" + ] + }, + "Runtime": "python3.7", + "Environment": { + "Variables": { + "CLUSTER_NAME": { + "Ref": "cluster227BD1CB20" + } + } + }, + "Layers": [ + { + "Fn::GetAtt": [ + "kubectllayer8C2542BCBF2B4DFEB765E181FD30A9A0617C4ADA", + "Outputs.LayerVersionArn" + ] + } + ], + "MemorySize": 256, + "Timeout": 900 + }, + "DependsOn": [ + "cluster22ResourceHandlerServiceRoleDefaultPolicy1D33C3AC", + "cluster22ResourceHandlerServiceRoleC2E4F327" + ] + }, + "cluster22AwsAuthmanifest4685C84D": { + "Type": "Custom::AWSCDK-EKS-KubernetesResource", + "Properties": { + "ServiceToken": { + "Fn::GetAtt": [ + "cluster22KubernetesResourceHandler599F07E6", + "Arn" + ] + }, + "Manifest": { + "Fn::Join": [ + "", + [ + "[{\"apiVersion\":\"v1\",\"kind\":\"ConfigMap\",\"metadata\":{\"name\":\"aws-auth\",\"namespace\":\"kube-system\"},\"data\":{\"mapRoles\":\"[{\\\"rolearn\\\":\\\"", + { + "Fn::GetAtt": [ + "AdminRole38563C57", + "Arn" + ] + }, + "\\\",\\\"groups\\\":[\\\"system:masters\\\"]},{\\\"rolearn\\\":\\\"", + { + "Fn::GetAtt": [ + "cluster22NodesInstanceRole51CD052F", + "Arn" + ] + }, + "\\\",\\\"username\\\":\\\"system:node:{{EC2PrivateDNSName}}\\\",\\\"groups\\\":[\\\"system:bootstrappers\\\",\\\"system:nodes\\\"]}]\",\"mapUsers\":\"[]\",\"mapAccounts\":\"[]\"}}]" + ] + ] + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "cluster22NodesInstanceSecurityGroup4A3CDC24": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "k8s-cluster/cluster22/Nodes/InstanceSecurityGroup", + "SecurityGroupEgress": [ + { + "CidrIp": "0.0.0.0/0", + "Description": "Allow all outbound traffic by default", + "IpProtocol": "-1" + } + ], + "SecurityGroupIngress": [], + "Tags": [ + { + "Key": "Name", + "Value": "k8s-cluster/cluster22/Nodes" + }, + { + "Key": { + "Fn::Join": [ + "", + [ + "kubernetes.io/cluster/", + { + "Ref": "cluster227BD1CB20" + } + ] + ] + }, + "Value": "owned" + } + ], + "VpcId": { + "Fn::ImportValue": "k8s-vpc:ExportsOutputRefvpcA2121C384D1B3CDE" + } + } + }, + "cluster22NodesInstanceSecurityGroupfromk8sclustercluster22NodesInstanceSecurityGroupF903AE86ALLTRAFFIC774C7781": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "IpProtocol": "-1", + "Description": "from k8sclustercluster22NodesInstanceSecurityGroupF903AE86:ALL TRAFFIC", + "GroupId": { + "Fn::GetAtt": [ + "cluster22NodesInstanceSecurityGroup4A3CDC24", + "GroupId" + ] + }, + "SourceSecurityGroupId": { + "Fn::GetAtt": [ + "cluster22NodesInstanceSecurityGroup4A3CDC24", + "GroupId" + ] + } + } + }, + "cluster22NodesInstanceSecurityGroupfromk8sclustercluster22ControlPlaneSecurityGroup3B5F21B44434A6E344D": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "IpProtocol": "tcp", + "Description": "from k8sclustercluster22ControlPlaneSecurityGroup3B5F21B4:443", + "FromPort": 443, + "GroupId": { + "Fn::GetAtt": [ + "cluster22NodesInstanceSecurityGroup4A3CDC24", + "GroupId" + ] + }, + "SourceSecurityGroupId": { + "Fn::GetAtt": [ + "cluster22ControlPlaneSecurityGroup2648B9CD", + "GroupId" + ] + }, + "ToPort": 443 + } + }, + "cluster22NodesInstanceSecurityGroupfromk8sclustercluster22ControlPlaneSecurityGroup3B5F21B41025655355658FCAA": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "IpProtocol": "tcp", + "Description": "from k8sclustercluster22ControlPlaneSecurityGroup3B5F21B4:1025-65535", + "FromPort": 1025, + "GroupId": { + "Fn::GetAtt": [ + "cluster22NodesInstanceSecurityGroup4A3CDC24", + "GroupId" + ] + }, + "SourceSecurityGroupId": { + "Fn::GetAtt": [ + "cluster22ControlPlaneSecurityGroup2648B9CD", + "GroupId" + ] + }, + "ToPort": 65535 + } + }, + "cluster22NodesInstanceRole51CD052F": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": { + "Fn::Join": [ + "", + [ + "ec2.", + { + "Ref": "AWS::URLSuffix" + } + ] + ] + } + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/AmazonEKSWorkerNodePolicy" + ] + ] + }, + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/AmazonEKS_CNI_Policy" + ] + ] + }, + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + ] + ] + } + ] + } + }, + "cluster22NodesInstanceProfile3D4963ED": { + "Type": "AWS::IAM::InstanceProfile", + "Properties": { + "Roles": [ + { + "Ref": "cluster22NodesInstanceRole51CD052F" + } + ] + } + }, + "cluster22NodesLaunchConfig184BF3BA": { + "Type": "AWS::AutoScaling::LaunchConfiguration", + "Properties": { + "ImageId": "ami-12345", + "InstanceType": "t2.medium", + "IamInstanceProfile": { + "Ref": "cluster22NodesInstanceProfile3D4963ED" + }, + "SecurityGroups": [ + { + "Fn::GetAtt": [ + "cluster22NodesInstanceSecurityGroup4A3CDC24", + "GroupId" + ] + } + ], + "UserData": { + "Fn::Base64": { + "Fn::Join": [ + "", + [ + "#!/bin/bash\nset -o xtrace\n/etc/eks/bootstrap.sh ", + { + "Ref": "cluster227BD1CB20" + }, + " --use-max-pods 17" + ] + ] + } + } + }, + "DependsOn": [ + "cluster22NodesInstanceRole51CD052F" + ] + }, + "cluster22NodesASGC0A97398": { + "Type": "AWS::AutoScaling::AutoScalingGroup", + "Properties": { + "MaxSize": "3", + "MinSize": "1", + "DesiredCapacity": "3", + "LaunchConfigurationName": { + "Ref": "cluster22NodesLaunchConfig184BF3BA" + }, + "Tags": [ + { + "Key": "Name", + "PropagateAtLaunch": true, + "Value": "k8s-cluster/cluster22/Nodes" + }, + { + "Key": { + "Fn::Join": [ + "", + [ + "kubernetes.io/cluster/", + { + "Ref": "cluster227BD1CB20" + } + ] + ] + }, + "PropagateAtLaunch": true, + "Value": "owned" + } + ], + "VPCZoneIdentifier": [ + { + "Fn::ImportValue": "k8s-vpc:ExportsOutputRefvpcPrivateSubnet1Subnet934893E8236E2271" + }, + { + "Fn::ImportValue": "k8s-vpc:ExportsOutputRefvpcPrivateSubnet2Subnet7031C2BA60DCB1EE" + } + ] + }, + "UpdatePolicy": { + "AutoScalingRollingUpdate": { + "WaitOnResourceSignals": false, + "PauseTime": "PT0S", + "SuspendProcesses": [ + "HealthCheck", + "ReplaceUnhealthy", + "AZRebalance", + "AlarmNotification", + "ScheduledActions" + ] + }, + "AutoScalingScheduledAction": { + "IgnoreUnmodifiedGroupSizeProperties": true + } + } + }, + "cluster22manifesthellokubernetes849F52EA": { + "Type": "Custom::AWSCDK-EKS-KubernetesResource", + "Properties": { + "ServiceToken": { + "Fn::GetAtt": [ + "cluster22KubernetesResourceHandler599F07E6", + "Arn" + ] + }, + "Manifest": "[{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"name\":\"hello-kubernetes\"},\"spec\":{\"type\":\"LoadBalancer\",\"ports\":[{\"port\":80,\"targetPort\":8080}],\"selector\":{\"app\":\"hello-kubernetes\"}}},{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"name\":\"hello-kubernetes\"},\"spec\":{\"replicas\":1,\"selector\":{\"matchLabels\":{\"app\":\"hello-kubernetes\"}},\"template\":{\"metadata\":{\"labels\":{\"app\":\"hello-kubernetes\"}},\"spec\":{\"containers\":[{\"name\":\"hello-kubernetes\",\"image\":\"paulbouwer/hello-kubernetes:1.5\",\"ports\":[{\"containerPort\":8080}]}]}}}}]" + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "kubectllayer8C2542BCBF2B4DFEB765E181FD30A9A0617C4ADA": { + "Type": "AWS::Serverless::Application", + "Properties": { + "Location": { + "ApplicationId": "arn:aws:serverlessrepo:us-east-1:903779448426:applications/lambda-layer-kubectl", + "SemanticVersion": "1.13.7" + }, + "Parameters": { + "LayerName": "kubectl-bedb92f2e70f45155fba70d3425dd148" + } + } + }, + "AdminRole38563C57": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "AWS": { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::12345678:root" + ] + ] + } + } + } + ], + "Version": "2012-10-17" + } + } + } + }, + "Parameters": { + "cluster22ResourceHandlerCodeS3Bucket9D2D8D69": { + "Type": "String", + "Description": "S3 bucket for asset \"k8s-cluster/cluster22/Resource/ResourceHandler/Code\"" + }, + "cluster22ResourceHandlerCodeS3VersionKeyB44B9CDC": { + "Type": "String", + "Description": "S3 key for asset version \"k8s-cluster/cluster22/Resource/ResourceHandler/Code\"" + }, + "cluster22ResourceHandlerCodeArtifactHash26F3BC3F": { + "Type": "String", + "Description": "Artifact hash for asset \"k8s-cluster/cluster22/Resource/ResourceHandler/Code\"" + }, + "cluster22KubernetesResourceHandlerCodeS3Bucket73E6A860": { + "Type": "String", + "Description": "S3 bucket for asset \"k8s-cluster/cluster22/KubernetesResourceHandler/Code\"" + }, + "cluster22KubernetesResourceHandlerCodeS3VersionKey7C559451": { + "Type": "String", + "Description": "S3 key for asset version \"k8s-cluster/cluster22/KubernetesResourceHandler/Code\"" + }, + "cluster22KubernetesResourceHandlerCodeArtifactHash8138542A": { + "Type": "String", + "Description": "Artifact hash for asset \"k8s-cluster/cluster22/KubernetesResourceHandler/Code\"" + } + }, + "Outputs": { + "cluster22ClusterNameC4896469": { + "Value": { + "Ref": "cluster227BD1CB20" + } + }, + "cluster22NodesInstanceRoleARNEE60B01D": { + "Value": { + "Fn::GetAtt": [ + "cluster22NodesInstanceRole51CD052F", + "Arn" + ] + } + } + } + } +] \ No newline at end of file diff --git a/packages/@aws-cdk/aws-eks/test/integ.eks-kubectl.lit.ts b/packages/@aws-cdk/aws-eks/test/integ.eks-kubectl.lit.ts new file mode 100644 index 0000000000000..b02368ec43e3e --- /dev/null +++ b/packages/@aws-cdk/aws-eks/test/integ.eks-kubectl.lit.ts @@ -0,0 +1,100 @@ +/// !cdk-integ * + +import ec2 = require('@aws-cdk/aws-ec2'); +import iam = require('@aws-cdk/aws-iam'); +import { App, Construct, Stack, StackProps } from '@aws-cdk/core'; +import { Cluster } from '../lib'; + +// we must specify an explicit environment because we have an AMI map that is +// keyed from the target region. +const env = { + region: process.env.CDK_INTEG_REGION || process.env.CDK_DEFAULT_REGION, + account: process.env.CDK_INTEG_ACCOUNT || process.env.CDK_DEFAULT_ACCOUNT +}; + +class DevelopmentStack extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id, { env }); + } +} + +class VpcStack extends DevelopmentStack { + public readonly vpc: ec2.Vpc; + + constructor(scope: Construct, id: string) { + super(scope, id); + this.vpc = new ec2.Vpc(this, 'vpc', { maxAzs: 2 }); + } +} + +class ClusterStack extends DevelopmentStack { + public readonly cluster: Cluster; + public readonly instanceRoleExportName: string; + + constructor(scope: Construct, id: string, props: StackProps & { vpc: ec2.Vpc }) { + super(scope, id); + + /// !show + // define the cluster. kubectl is enabled by default. + this.cluster = new Cluster(this, 'cluster22', { + vpc: props.vpc, + }); + + // define an IAM role assumable by anyone in the account and map it to the k8s + // `system:masters` group this is required if you want to be able to issue + // manual `kubectl` commands against the cluster. + const mastersRole = new iam.Role(this, 'AdminRole', { assumedBy: new iam.AccountRootPrincipal() }); + this.cluster.awsAuth.addMastersRole(mastersRole); + + // add some capacity to the cluster. The IAM instance role will + // automatically be mapped via aws-auth to allow nodes to join the cluster. + this.cluster.addCapacity('Nodes', { + instanceType: new ec2.InstanceType('t2.medium'), + desiredCapacity: 3, + }); + + // add an arbitrary k8s manifest to the cluster. This will `kubectl apply` + // these resources upon creation or `kubectl delete` upon removal. + this.cluster.addResource('hello-kubernetes', + { + apiVersion: "v1", + kind: "Service", + metadata: { name: "hello-kubernetes" }, + spec: { + type: "LoadBalancer", + ports: [ { port: 80, targetPort: 8080 } ], + selector: { app: "hello-kubernetes" } + } + }, + { + apiVersion: "apps/v1", + kind: "Deployment", + metadata: { name: "hello-kubernetes" }, + spec: { + replicas: 1, + selector: { matchLabels: { app: "hello-kubernetes" } }, + template: { + metadata: { + labels: { app: "hello-kubernetes" } + }, + spec: { + containers: [ + { + name: "hello-kubernetes", + image: "paulbouwer/hello-kubernetes:1.5", + ports: [ { containerPort: 8080 } ] + } + ] + } + } + } + } + ); + /// !hide + } +} + +const app = new App(); +const vpcStack = new VpcStack(app, 'k8s-vpc'); +new ClusterStack(app, 'k8s-cluster', { vpc: vpcStack.vpc }); +app.synth(); diff --git a/packages/@aws-cdk/aws-eks/test/test.awsauth.ts b/packages/@aws-cdk/aws-eks/test/test.awsauth.ts new file mode 100644 index 0000000000000..4df334158f87f --- /dev/null +++ b/packages/@aws-cdk/aws-eks/test/test.awsauth.ts @@ -0,0 +1,102 @@ +import { countResources, expect, haveResource } from '@aws-cdk/assert'; +import ec2 = require('@aws-cdk/aws-ec2'); +import iam = require('@aws-cdk/aws-iam'); +import { Stack } from '@aws-cdk/core'; +import { Test } from 'nodeunit'; +import { Cluster, KubernetesResource } from '../lib'; +import { AwsAuth } from '../lib/aws-auth'; +import { testFixture } from './util'; + +// tslint:disable:max-line-length + +export = { + 'empty aws-auth'(test: Test) { + // GIVEN + const stack = new Stack(); + const vpc = new ec2.Vpc(stack, 'vpc'); + const cluster = new Cluster(stack, 'cluster', { vpc }); + + // WHEN + new AwsAuth(stack, 'AwsAuth', { cluster }); + + // THEN + expect(stack).to(haveResource(KubernetesResource.RESOURCE_TYPE, { + Manifest: JSON.stringify([{ + apiVersion: 'v1', + kind: 'ConfigMap', + metadata: { name: 'aws-auth', namespace: 'kube-system' }, + data: { mapRoles: '[]', mapUsers: '[]', mapAccounts: '[]' } + }]) + })); + test.done(); + }, + + 'addRoleMapping and addUserMapping can be used to define the aws-auth ConfigMap'(test: Test) { + // GIVEN + const { stack, vpc } = testFixture(); + const cluster = new Cluster(stack, 'Cluster', { vpc }); + const role = new iam.Role(stack, 'role', { assumedBy: new iam.AnyPrincipal() }); + const user = new iam.User(stack, 'user'); + + // WHEN + cluster.awsAuth.addRoleMapping(role, { groups: [ 'role-group1' ], username: 'roleuser' }); + cluster.awsAuth.addRoleMapping(role, { groups: [ 'role-group2', 'role-group3' ] }); + cluster.awsAuth.addUserMapping(user, { groups: [ 'user-group1', 'user-group2' ] }); + cluster.awsAuth.addUserMapping(user, { groups: [ 'user-group1', 'user-group2' ], username: 'foo' }); + cluster.awsAuth.addAccount('112233'); + cluster.awsAuth.addAccount('5566776655'); + + // THEN + expect(stack).to(countResources(KubernetesResource.RESOURCE_TYPE, 1)); + expect(stack).to(haveResource(KubernetesResource.RESOURCE_TYPE, { + Manifest: { + "Fn::Join": [ + "", + [ + "[{\"apiVersion\":\"v1\",\"kind\":\"ConfigMap\",\"metadata\":{\"name\":\"aws-auth\",\"namespace\":\"kube-system\"},\"data\":{\"mapRoles\":\"[{\\\"rolearn\\\":\\\"", + { + "Fn::GetAtt": [ + "roleC7B7E775", + "Arn" + ] + }, + "\\\",\\\"username\\\":\\\"roleuser\\\",\\\"groups\\\":[\\\"role-group1\\\"]},{\\\"rolearn\\\":\\\"", + { + "Fn::GetAtt": [ + "roleC7B7E775", + "Arn" + ] + }, + "\\\",\\\"groups\\\":[\\\"role-group2\\\",\\\"role-group3\\\"]}]\",\"mapUsers\":\"[{\\\"userarn\\\":\\\"arn:", + { + Ref: "AWS::Partition" + }, + ":iam:us-east-1:", + { + Ref: "AWS::AccountId" + }, + ":user/", + { + Ref: "user2C2B57AE" + }, + "\\\",\\\"groups\\\":[\\\"user-group1\\\",\\\"user-group2\\\"]},{\\\"userarn\\\":\\\"arn:", + { + Ref: "AWS::Partition" + }, + ":iam:us-east-1:", + { + Ref: "AWS::AccountId" + }, + ":user/", + { + Ref: "user2C2B57AE" + }, + "\\\",\\\"username\\\":\\\"foo\\\",\\\"groups\\\":[\\\"user-group1\\\",\\\"user-group2\\\"]}]\",\"mapAccounts\":\"[\\\"112233\\\",\\\"5566776655\\\"]\"}}]" + ] + ] + } + })); + test.done(); + }, + +}; diff --git a/packages/@aws-cdk/aws-eks/test/test.cluster.ts b/packages/@aws-cdk/aws-eks/test/test.cluster.ts index eaff4340b822d..61fbe0068843c 100644 --- a/packages/@aws-cdk/aws-eks/test/test.cluster.ts +++ b/packages/@aws-cdk/aws-eks/test/test.cluster.ts @@ -1,9 +1,14 @@ -import { expect, haveResource, haveResourceLike } from '@aws-cdk/assert'; +import { expect, haveResource, haveResourceLike, not } from '@aws-cdk/assert'; import ec2 = require('@aws-cdk/aws-ec2'); +import iam = require('@aws-cdk/aws-iam'); import cdk = require('@aws-cdk/core'); import { CfnOutput } from '@aws-cdk/core'; import { Test } from 'nodeunit'; import eks = require('../lib'); +import { KubernetesResource } from '../lib'; +import { testFixture } from './util'; + +// tslint:disable:max-line-length export = { 'a default cluster spans all subnets'(test: Test) { @@ -11,7 +16,7 @@ export = { const { stack, vpc } = testFixture(); // WHEN - new eks.Cluster(stack, 'Cluster', { vpc }); + new eks.Cluster(stack, 'Cluster', { vpc, kubectlEnabled: false }); // THEN expect(stack).to(haveResourceLike('AWS::EKS::Cluster', { @@ -33,7 +38,7 @@ export = { const { stack, vpc } = testFixture(); // WHEN - new eks.Cluster(stack, 'Cluster', { vpc }); + new eks.Cluster(stack, 'Cluster', { vpc, kubectlEnabled: false }); // THEN expect(stack).to(haveResource('AWS::EC2::Subnet', { @@ -51,7 +56,7 @@ export = { 'adding capacity creates an ASG with tags'(test: Test) { // GIVEN const { stack, vpc } = testFixture(); - const cluster = new eks.Cluster(stack, 'Cluster', { vpc }); + const cluster = new eks.Cluster(stack, 'Cluster', { vpc, kubectlEnabled: false }); // WHEN cluster.addCapacity('Default', { @@ -80,7 +85,7 @@ export = { 'adding capacity correctly deduces maxPods and adds userdata'(test: Test) { // GIVEN const { stack, vpc } = testFixture(); - const cluster = new eks.Cluster(stack, 'Cluster', { vpc }); + const cluster = new eks.Cluster(stack, 'Cluster', { vpc, kubectlEnabled: false }); // WHEN cluster.addCapacity('Default', { @@ -110,7 +115,7 @@ export = { // GIVEN const { stack: stack1, vpc, app } = testFixture(); const stack2 = new cdk.Stack(app, 'stack2', { env: { region: 'us-east-1' } }); - const cluster = new eks.Cluster(stack1, 'Cluster', { vpc }); + const cluster = new eks.Cluster(stack1, 'Cluster', { vpc, kubectlEnabled: false }); // WHEN const imported = eks.Cluster.fromClusterAttributes(stack2, 'Imported', { @@ -137,12 +142,131 @@ export = { }); test.done(); }, -}; -function testFixture() { - const app = new cdk.App(); - const stack = new cdk.Stack(app, 'Stack', { env: { region: 'us-east-1' }}); - const vpc = new ec2.Vpc(stack, 'VPC'); + 'disabled features when kubectl is disabled'(test: Test) { + // GIVEN + const { stack, vpc } = testFixture(); + const cluster = new eks.Cluster(stack, 'Cluster', { vpc, kubectlEnabled: false }); + + test.throws(() => cluster.awsAuth, /Cannot define aws-auth mappings if kubectl is disabled/); + test.throws(() => cluster.addResource('foo', {}), /Cannot define a KubernetesManifest resource on a cluster with kubectl disabled/); + test.throws(() => cluster.addCapacity('boo', { instanceType: new ec2.InstanceType('r5d.24xlarge'), mapRole: true }), + /Cannot map instance IAM role to RBAC if kubectl is disabled for the cluster/); + test.done(); + }, + + 'mastersRole can be used to map an IAM role to "system:masters" (required kubectl)'(test: Test) { + // GIVEN + const { stack, vpc } = testFixture(); + const role = new iam.Role(stack, 'role', { assumedBy: new iam.AnyPrincipal() }); + + // WHEN + new eks.Cluster(stack, 'Cluster', { vpc, mastersRole: role }); + + // THEN + expect(stack).to(haveResource(KubernetesResource.RESOURCE_TYPE, { + Manifest: { + "Fn::Join": [ + "", + [ + "[{\"apiVersion\":\"v1\",\"kind\":\"ConfigMap\",\"metadata\":{\"name\":\"aws-auth\",\"namespace\":\"kube-system\"},\"data\":{\"mapRoles\":\"[{\\\"rolearn\\\":\\\"", + { + "Fn::GetAtt": [ + "roleC7B7E775", + "Arn" + ] + }, + "\\\",\\\"groups\\\":[\\\"system:masters\\\"]}]\",\"mapUsers\":\"[]\",\"mapAccounts\":\"[]\"}}]" + ] + ] + } + })); + + test.done(); + }, + + 'addResource can be used to apply k8s manifests on this cluster'(test: Test) { + // GIVEN + const { stack, vpc } = testFixture(); + const cluster = new eks.Cluster(stack, 'Cluster', { vpc }); + + // WHEN + cluster.addResource('manifest1', { foo: 123 }); + cluster.addResource('manifest2', { bar: 123 }, { boor: [ 1, 2, 3 ] }); + + // THEN + expect(stack).to(haveResource(KubernetesResource.RESOURCE_TYPE, { + Manifest: "[{\"foo\":123}]" + })); + + expect(stack).to(haveResource(KubernetesResource.RESOURCE_TYPE, { + Manifest: "[{\"bar\":123},{\"boor\":[1,2,3]}]" + })); + + test.done(); + }, + + 'when kubectl is enabled (default) adding capacity will automatically map its IAM role'(test: Test) { + // GIVEN + const { stack, vpc } = testFixture(); + const cluster = new eks.Cluster(stack, 'Cluster', { vpc }); + + // WHEN + cluster.addCapacity('default', { + instanceType: new ec2.InstanceType('t2.nano'), + }); - return { stack, vpc, app }; -} + // THEN + expect(stack).to(haveResource(KubernetesResource.RESOURCE_TYPE, { + Manifest: { + "Fn::Join": [ + "", + [ + "[{\"apiVersion\":\"v1\",\"kind\":\"ConfigMap\",\"metadata\":{\"name\":\"aws-auth\",\"namespace\":\"kube-system\"},\"data\":{\"mapRoles\":\"[{\\\"rolearn\\\":\\\"", + { + "Fn::GetAtt": [ + "ClusterdefaultInstanceRoleF20A29CD", + "Arn" + ] + }, + "\\\",\\\"username\\\":\\\"system:node:{{EC2PrivateDNSName}}\\\",\\\"groups\\\":[\\\"system:bootstrappers\\\",\\\"system:nodes\\\"]}]\",\"mapUsers\":\"[]\",\"mapAccounts\":\"[]\"}}]" + ] + ] + } + })); + + test.done(); + }, + + 'addCapacity will *not* map the IAM role if mapRole is false'(test: Test) { + // GIVEN + const { stack, vpc } = testFixture(); + const cluster = new eks.Cluster(stack, 'Cluster', { vpc }); + + // WHEN + cluster.addCapacity('default', { + instanceType: new ec2.InstanceType('t2.nano'), + mapRole: false + }); + + // THEN + expect(stack).to(not(haveResource(KubernetesResource.RESOURCE_TYPE))); + test.done(); + }, + + 'addCapacity will *not* map the IAM role if kubectl is disabled'(test: Test) { + // GIVEN + const { stack, vpc } = testFixture(); + const cluster = new eks.Cluster(stack, 'Cluster', { vpc, kubectlEnabled: false }); + + // WHEN + cluster.addCapacity('default', { + instanceType: new ec2.InstanceType('t2.nano') + }); + + // THEN + expect(stack).to(not(haveResource(KubernetesResource.RESOURCE_TYPE))); + test.done(); + } + +}; diff --git a/packages/@aws-cdk/aws-eks/test/test.manifest.ts b/packages/@aws-cdk/aws-eks/test/test.manifest.ts new file mode 100644 index 0000000000000..95112ac027f42 --- /dev/null +++ b/packages/@aws-cdk/aws-eks/test/test.manifest.ts @@ -0,0 +1,79 @@ +import { expect, haveResource } from '@aws-cdk/assert'; +import { Vpc } from '@aws-cdk/aws-ec2'; +import { Stack } from '@aws-cdk/core'; +import { Test } from 'nodeunit'; +import { Cluster, KubernetesResource } from '../lib'; + +// tslint:disable:max-line-length + +export = { + 'basic usage'(test: Test) { + // GIVEN + const stack = new Stack(); + const vpc = new Vpc(stack, 'vpc'); + const cluster = new Cluster(stack, 'cluster', { vpc }); + + const manifest = [ + { + apiVersion: 'v1', + kind: 'Service', + metadata: { + name: 'hello-kubernetes', + }, + spec: { + type: 'LoadBalancer', + ports: [ + { port: 80, targetPort: 8080 } + ], + selector: { + app: 'hello-kubernetes' + } + } + }, + { + apiVersion: 'apps/v1', + kind: 'Deployment', + metadata: { + name: 'hello-kubernetes' + }, + spec: { + replicas: 2, + selector: { + matchLabels: { + app: 'hello-kubernetes' + } + }, + template: { + metadata: { + labels: { + app: 'hello-kubernetes' + } + }, + spec: { + containers: [ + { + name: 'hello-kubernetes', + image: 'paulbouwer/hello-kubernetes:1.5', + ports: [ + { containerPort: 8080 } + ] + } + ] + } + } + } + } + ]; + + // WHEN + new KubernetesResource(stack, 'manifest', { + cluster, + manifest + }); + + expect(stack).to(haveResource(KubernetesResource.RESOURCE_TYPE, { + Manifest: JSON.stringify(manifest) + })); + test.done(); + } +}; \ No newline at end of file diff --git a/packages/@aws-cdk/aws-eks/test/util.ts b/packages/@aws-cdk/aws-eks/test/util.ts new file mode 100644 index 0000000000000..12065b26588f3 --- /dev/null +++ b/packages/@aws-cdk/aws-eks/test/util.ts @@ -0,0 +1,10 @@ +import ec2 = require('@aws-cdk/aws-ec2'); +import { App, Stack } from '@aws-cdk/core'; + +export function testFixture() { + const app = new App(); + const stack = new Stack(app, 'Stack', { env: { region: 'us-east-1' }}); + const vpc = new ec2.Vpc(stack, 'VPC'); + + return { stack, vpc, app }; +}