Skip to content

Commit

Permalink
fix(eks): kubectl provider out-of-memory for large manifests/charts (…
Browse files Browse the repository at this point in the history
…now 1GiB) (#11957)

Increase the default memory size of the kubectl provider's lambda function to 1GiB and introduce a `kubectlMemory` option that can be used to control memory allocation if needed.

Fixes #11787


----

*By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license*
  • Loading branch information
Elad Ben-Israel authored Dec 9, 2020
1 parent 6f306ad commit 2ec2948
Show file tree
Hide file tree
Showing 4 changed files with 97 additions and 9 deletions.
25 changes: 24 additions & 1 deletion packages/@aws-cdk/aws-eks/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -450,7 +450,11 @@ The `ClusterHandler` is a Lambda function responsible to interact the EKS API in

### Kubectl Support

The resources are created in the cluster by running `kubectl apply` from a python lambda function. You can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:
The resources are created in the cluster by running `kubectl apply` from a python lambda function.

#### Environment

You can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy:

```ts
const cluster = new eks.Cluster(this, 'hello-eks', {
Expand All @@ -461,6 +465,8 @@ const cluster = new eks.Cluster(this, 'hello-eks', {
});
```

#### Runtime

By default, the `kubectl`, `helm` and `aws` commands used to operate the cluster are provided by an AWS Lambda Layer from the AWS Serverless Application in [aws-lambda-layer-kubectl](https://github.com/aws-samples/aws-lambda-layer-kubectl). In most cases this should be sufficient.

You can provide a custom layer in case the default layer does not meet your
Expand Down Expand Up @@ -496,6 +502,23 @@ const cluster = eks.Cluster.fromClusterAttributes(this, 'MyCluster', {
> Instructions on how to build `layer.zip` can be found
> [here](https://github.com/aws-samples/aws-lambda-layer-kubectl/blob/master/cdk/README.md).
#### Memory

By default, the kubectl provider is configured with 1024MiB of memory. You can use the `kubectlMemory` option to specify the memory size for the AWS Lambda function:

```ts
import { Size } from '@aws-cdk/core';

new eks.Cluster(this, 'MyCluster', {
kubectlMemory: Size.gibibytes(4)
});

// or
eks.Cluster.fromClusterAttributes(this, 'MyCluster', {
kubectlMemory: Size.gibibytes(4)
});
```

### ARM64 Support

Instance types with `ARM64` architecture are supported in both managed nodegroup and self-managed capacity. Simply specify an ARM64 `instanceType` (such as `m6g.medium`), and the latest
Expand Down
34 changes: 30 additions & 4 deletions packages/@aws-cdk/aws-eks/lib/cluster.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import * as iam from '@aws-cdk/aws-iam';
import * as kms from '@aws-cdk/aws-kms';
import * as lambda from '@aws-cdk/aws-lambda';
import * as ssm from '@aws-cdk/aws-ssm';
import { Annotations, CfnOutput, CfnResource, IResource, Resource, Stack, Tags, Token, Duration } from '@aws-cdk/core';
import { Annotations, CfnOutput, CfnResource, IResource, Resource, Stack, Tags, Token, Duration, Size } from '@aws-cdk/core';
import { Construct, Node } from 'constructs';
import * as YAML from 'yaml';
import { AwsAuth } from './aws-auth';
Expand Down Expand Up @@ -92,22 +92,21 @@ export interface ICluster extends IResource, ec2.IConnectable {

/**
* Custom environment variables when running `kubectl` against this cluster.
* @default - no additional environment variables
*/
readonly kubectlEnvironment?: { [key: string]: string };

/**
* A security group to use for `kubectl` execution.
*
* @default - If not specified, the k8s endpoint is expected to be accessible
* If this is undefined, the k8s endpoint is expected to be accessible
* publicly.
*/
readonly kubectlSecurityGroup?: ec2.ISecurityGroup;

/**
* Subnets to host the `kubectl` compute resources.
*
* @default - If not specified, the k8s endpoint is expected to be accessible
* If this is undefined, the k8s endpoint is expected to be accessible
* publicly.
*/
readonly kubectlPrivateSubnets?: ec2.ISubnet[];
Expand All @@ -119,6 +118,10 @@ export interface ICluster extends IResource, ec2.IConnectable {
*/
readonly kubectlLayer?: lambda.ILayerVersion;

/**
* Amount of memory to allocate to the provider's lambda function.
*/
readonly kubectlMemory?: Size;
/**
* Creates a new service account with corresponding IAM Role (IRSA).
*
Expand Down Expand Up @@ -271,6 +274,13 @@ export interface ClusterAttributes {
* @see https://github.com/aws-samples/aws-lambda-layer-kubectl
*/
readonly kubectlLayer?: lambda.ILayerVersion;

/**
* Amount of memory to allocate to the provider's lambda function.
*
* @default Size.gibibytes(1)
*/
readonly kubectlMemory?: Size;
}

/**
Expand Down Expand Up @@ -416,6 +426,13 @@ export interface ClusterOptions extends CommonClusterOptions {
* @see https://github.com/aws-samples/aws-lambda-layer-kubectl
*/
readonly kubectlLayer?: lambda.ILayerVersion;

/**
* Amount of memory to allocate to the provider's lambda function.
*
* @default Size.gibibytes(1)
*/
readonly kubectlMemory?: Size;
}

/**
Expand Down Expand Up @@ -630,6 +647,7 @@ abstract class ClusterBase extends Resource implements ICluster {
public abstract readonly kubectlEnvironment?: { [key: string]: string };
public abstract readonly kubectlSecurityGroup?: ec2.ISecurityGroup;
public abstract readonly kubectlPrivateSubnets?: ec2.ISubnet[];
public abstract readonly kubectlMemory?: Size;
public abstract readonly openIdConnectProvider: iam.IOpenIdConnectProvider;

/**
Expand Down Expand Up @@ -842,6 +860,11 @@ export class Cluster extends ClusterBase {
*/
public readonly kubectlLayer?: lambda.ILayerVersion;

/**
* The amount of memory allocated to the kubectl provider's lambda function.
*/
public readonly kubectlMemory?: Size;

/**
* If this cluster is kubectl-enabled, returns the `ClusterResource` object
* that manages it. If this cluster is not kubectl-enabled (i.e. uses the
Expand Down Expand Up @@ -929,6 +952,7 @@ export class Cluster extends ClusterBase {
this.endpointAccess = props.endpointAccess ?? EndpointAccess.PUBLIC_AND_PRIVATE;
this.kubectlEnvironment = props.kubectlEnvironment;
this.kubectlLayer = props.kubectlLayer;
this.kubectlMemory = props.kubectlMemory;

const privateSubents = this.selectPrivateSubnets().slice(0, 16);
const publicAccessDisabled = !this.endpointAccess._config.publicAccess;
Expand Down Expand Up @@ -1630,6 +1654,7 @@ class ImportedCluster extends ClusterBase {
public readonly kubectlSecurityGroup?: ec2.ISecurityGroup | undefined;
public readonly kubectlPrivateSubnets?: ec2.ISubnet[] | undefined;
public readonly kubectlLayer?: lambda.ILayerVersion;
public readonly kubectlMemory?: Size;

constructor(scope: Construct, id: string, private readonly props: ClusterAttributes) {
super(scope, id);
Expand All @@ -1641,6 +1666,7 @@ class ImportedCluster extends ClusterBase {
this.kubectlEnvironment = props.kubectlEnvironment;
this.kubectlPrivateSubnets = props.kubectlPrivateSubnetIds ? props.kubectlPrivateSubnetIds.map((subnetid, index) => ec2.Subnet.fromSubnetId(this, `KubectlSubnet${index}`, subnetid)) : undefined;
this.kubectlLayer = props.kubectlLayer;
this.kubectlMemory = props.kubectlMemory;

let i = 1;
for (const sgid of props.securityGroupIds ?? []) {
Expand Down
3 changes: 2 additions & 1 deletion packages/@aws-cdk/aws-eks/lib/kubectl-provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ export class KubectlProvider extends NestedStack {
}

const layer = cluster.kubectlLayer ?? getOrCreateKubectlLayer(this);
const memorySize = cluster.kubectlMemory ? cluster.kubectlMemory.toMebibytes() : 1024;

const handler = new lambda.Function(this, 'Handler', {
code: lambda.Code.fromAsset(path.join(__dirname, 'kubectl-handler')),
Expand All @@ -75,7 +76,7 @@ export class KubectlProvider extends NestedStack {
timeout: Duration.minutes(15),
description: 'onEvent handler for EKS kubectl resource provider',
layers: [layer],
memorySize: 256,
memorySize,
environment: cluster.kubectlEnvironment,

// defined only when using private access
Expand Down
44 changes: 41 additions & 3 deletions packages/@aws-cdk/aws-eks/test/test.cluster.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import * as constructs from 'constructs';
import { Test } from 'nodeunit';
import * as YAML from 'yaml';
import * as eks from '../lib';
import { getOrCreateKubectlLayer } from '../lib/kubectl-provider';
import * as kubectl from '../lib/kubectl-provider';
import { BottleRocketImage } from '../lib/private/bottlerocket';
import { testFixture, testFixtureNoVpc } from './util';

Expand Down Expand Up @@ -391,7 +391,7 @@ export = {
// WHEN
const vpc = new ec2.Vpc(stack, 'VPC');
new eks.Cluster(stack, 'Cluster', { vpc, defaultCapacity: 0, version: CLUSTER_VERSION });
getOrCreateKubectlLayer(stack);
kubectl.getOrCreateKubectlLayer(stack);

// THEN
expect(stack).to(haveResource('Custom::AWSCDK-EKS-Cluster'));
Expand All @@ -411,7 +411,7 @@ export = {
// WHEN
const vpc = new ec2.Vpc(stack, 'VPC');
new eks.Cluster(stack, 'Cluster', { vpc, defaultCapacity: 0, version: CLUSTER_VERSION });
getOrCreateKubectlLayer(stack);
kubectl.getOrCreateKubectlLayer(stack);

// THEN
expect(stack).to(haveResource('Custom::AWSCDK-EKS-Cluster'));
Expand Down Expand Up @@ -2583,4 +2583,42 @@ export = {
}));
test.done();
},

'custom memory size for kubectl provider'(test: Test) {
// GIVEN
const { stack, vpc, app } = testFixture();

// WHEN
new eks.Cluster(stack, 'Cluster', {
vpc,
version: CLUSTER_VERSION,
kubectlMemory: cdk.Size.gibibytes(2),
});

// THEN
const casm = app.synth();
const providerNestedStackTemplate = JSON.parse(fs.readFileSync(path.join(casm.directory, 'StackawscdkawseksKubectlProvider7346F799.nested.template.json'), 'utf-8'));
test.equal(providerNestedStackTemplate?.Resources?.Handler886CB40B?.Properties?.MemorySize, 2048);
test.done();
},

'custom memory size for imported clusters'(test: Test) {
// GIVEN
const { stack, app } = testFixture();

// WHEN
const cluster = eks.Cluster.fromClusterAttributes(stack, 'Imported', {
clusterName: 'my-cluster',
kubectlRoleArn: 'arn:aws:iam::123456789012:role/MyRole',
kubectlMemory: cdk.Size.gibibytes(4),
});

cluster.addManifest('foo', { bar: 123 });

// THEN
const casm = app.synth();
const providerNestedStackTemplate = JSON.parse(fs.readFileSync(path.join(casm.directory, 'StackStackImported1CBA9C50KubectlProviderAA00BA49.nested.template.json'), 'utf-8'));
test.equal(providerNestedStackTemplate?.Resources?.Handler886CB40B?.Properties?.MemorySize, 4096);
test.done();
},
};

0 comments on commit 2ec2948

Please sign in to comment.