diff --git a/core/controlplane/config/encrypted_assets.go b/core/controlplane/config/encrypted_assets.go index b016704e7..09d16f0ce 100644 --- a/core/controlplane/config/encrypted_assets.go +++ b/core/controlplane/config/encrypted_assets.go @@ -440,11 +440,6 @@ func ReadOrEncryptAssets(dirname string, manageCertificates bool, caKeyRequiredO } func (r *RawAssetsOnMemory) WriteToDir(dirname string, includeCAKey bool) error { - workerCAKeyDefaultSymlinkTo := "" - if includeCAKey { - workerCAKeyDefaultSymlinkTo = "ca-key.pem" - } - assets := []struct { name string data []byte @@ -453,7 +448,6 @@ func (r *RawAssetsOnMemory) WriteToDir(dirname string, includeCAKey bool) error }{ {"ca.pem", r.CACert, true, ""}, {"worker-ca.pem", r.WorkerCACert, true, "ca.pem"}, - {"worker-ca-key.pem", r.WorkerCAKey, true, workerCAKeyDefaultSymlinkTo}, {"apiserver.pem", r.APIServerCert, true, ""}, {"apiserver-key.pem", r.APIServerKey, true, ""}, {"worker.pem", r.WorkerCert, true, ""}, @@ -480,6 +474,13 @@ func (r *RawAssetsOnMemory) WriteToDir(dirname string, includeCAKey bool) error overwrite bool ifEmptySymlinkTo string }{"ca-key.pem", r.CAKey, true, ""}) + + assets = append(assets, struct { + name string + data []byte + overwrite bool + ifEmptySymlinkTo string + }{"worker-ca-key.pem", r.WorkerCAKey, true, "ca-key.pem"}) } for _, asset := range assets { diff --git a/core/controlplane/config/templates/cloud-config-controller b/core/controlplane/config/templates/cloud-config-controller index 000f2eae2..c0757ca51 100644 --- a/core/controlplane/config/templates/cloud-config-controller +++ b/core/controlplane/config/templates/cloud-config-controller @@ -781,7 +781,20 @@ write_files: #!/bin/bash -e kubectl() { - /usr/bin/docker run --rm --net=host -v /srv/kubernetes:/srv/kubernetes {{.HyperkubeImage.RepoWithTag}} /hyperkube kubectl "$@" + # --request-timeout=1s is intended to instruct kubectl to give up discovering unresponsive apiservice(s) in certain periods + # so that temporal freakiness/unresponsity of specific apiservice until apiserver/controller-manager fully starts doesn't + # affect the whole controller bootstrap process. + /usr/bin/docker run --rm --net=host -v /srv/kubernetes:/srv/kubernetes {{.HyperkubeImage.RepoWithTag}} /hyperkube kubectl --request-timeout=1s "$@" + } + + ks() { + kubectl --namespace kube-system "$@" + } + + # Try to batch as many files as possible to reduce the total amount of delay due to wilderness in the API aggregation + # See https://github.com/kubernetes-incubator/kube-aws/issues/1039 + applyall() { + kubectl apply -f $(echo "$@" | tr ' ' ',') } while ! kubectl get ns kube-system; do @@ -789,100 +802,99 @@ write_files: sleep 3 done + # See https://github.com/kubernetes-incubator/kube-aws/issues/1039#issuecomment-348978375 + if ks get apiservice v1beta1.metrics.k8s.io && ! ps ax | grep '[h]yperkube proxy'; then + echo "apiserver is up but kube-proxy isn't up. We have likely encountered #1039." + echo "Temporary deleting the v1beta1.metrics.k8s.io apiservice as a work-around for #1039" + ks delete apiservice v1beta1.metrics.k8s.io + + echo Waiting until controller-manager stabilizes and it creates a kube-proxy pod. + until ps ax | grep '[h]yperkube proxy'; do + echo Sleeping 3 seconds. + sleep 3 + done + echo kube-proxy stared. apiserver should be responsive again. + fi + mfdir=/srv/kubernetes/manifests + rbac=/srv/kubernetes/rbac {{ if .UseCalico }} /bin/bash /opt/bin/populate-tls-calico-etcd - kubectl apply -f "${mfdir}/calico.yaml" + applyall "${mfdir}/calico.yaml" {{ end }} + {{ if .Addons.MetricsServer.Enabled -}} + applyall \ + "${mfdir}/metrics-server-sa.yaml" \ + "${mfdir}/metrics-server-de.yaml" \ + "${mfdir}/metrics-server-svc.yaml" \ + "${rbac}/cluster-roles/metrics-server.yaml" \ + "${rbac}/cluster-role-bindings/metrics-server.yaml" \ + "${rbac}/role-bindings/metrics-server.yaml" \ + "${mfdir}/metrics-server-apisvc.yaml" + {{- end }} + {{ if .Experimental.NodeDrainer.Enabled }} - for manifest in {kube-node-drainer-ds,kube-node-drainer-asg-status-updater-de}; do - kubectl apply -f "${mfdir}/$manifest.yaml" - done + applyall "${mfdir}"/{kube-node-drainer-ds,kube-node-drainer-asg-status-updater-de}".yaml" {{ end }} # Secrets - kubectl apply -f "${mfdir}/kubernetes-dashboard-se.yaml" + applyall "${mfdir}/kubernetes-dashboard-se.yaml" # Configmaps - for manifest in {kube-dns,kube-proxy}; do - kubectl apply -f "${mfdir}/$manifest-cm.yaml" - done + applyall "${mfdir}"/{kube-dns,kube-proxy}"-cm.yaml" # Service Accounts - for manifest in {kube-dns,heapster,kube-proxy,kubernetes-dashboard,metrics-server}; do - kubectl apply -f "${mfdir}/$manifest-sa.yaml" - done + applyall "${mfdir}"/{kube-dns,heapster,kube-proxy,kubernetes-dashboard}"-sa.yaml" # Install tiller by default - kubectl apply -f "${mfdir}/tiller.yaml" + applyall "${mfdir}/tiller.yaml" {{ if .KubeDns.NodeLocalResolver }} # DNS Masq Fix - kubectl apply -f "${mfdir}/dnsmasq-node-ds.yaml" + applyall "${mfdir}/dnsmasq-node-ds.yaml" {{ end }} # Deployments - for manifest in {kube-dns,kube-dns-autoscaler,kubernetes-dashboard,{{ if .Addons.ClusterAutoscaler.Enabled }}cluster-autoscaler,{{ end }}heapster{{ if .KubeResourcesAutosave.Enabled }},kube-resources-autosave{{ end }},metrics-server}; do - kubectl apply -f "${mfdir}/$manifest-de.yaml" - done + applyall "${mfdir}"/{kube-dns,kube-dns-autoscaler,kubernetes-dashboard,{{ if .Addons.ClusterAutoscaler.Enabled }}cluster-autoscaler,{{ end }}heapster{{ if .KubeResourcesAutosave.Enabled }},kube-resources-autosave{{ end }}}"-de.yaml" # Daemonsets - for manifest in {kube-proxy,}; do - kubectl apply -f "${mfdir}/$manifest-ds.yaml" - done + applyall "${mfdir}"/kube-proxy"-ds.yaml" # Services - for manifest in {kube-dns,heapster,kubernetes-dashboard,metrics-server}; do - kubectl apply -f "${mfdir}/$manifest-svc.yaml" - done + applyall "${mfdir}"/{kube-dns,heapster,kubernetes-dashboard}"-svc.yaml" {{- if .Addons.Rescheduler.Enabled }} - kubectl apply -f "${mfdir}/kube-rescheduler-de.yaml" + applyall "${mfdir}/kube-rescheduler-de.yaml" {{- end }} - # API Services - for manifest in {metrics-server,}; do - kubectl apply -f "${mfdir}/$manifest-apisvc.yaml" - done - mfdir=/srv/kubernetes/rbac # Cluster roles and bindings - for manifest in {node-extensions,metrics-server}; do - kubectl apply -f "${mfdir}/cluster-roles/$manifest.yaml" - done - for manifest in {kube-admin,system-worker,node,node-proxier,node-extensions,heapster,metrics-server}; do - kubectl apply -f "${mfdir}/cluster-role-bindings/$manifest.yaml" - done + applyall "${mfdir}/cluster-roles/node-extensions.yaml" + + applyall "${mfdir}/cluster-role-bindings"/{kube-admin,system-worker,node,node-proxier,node-extensions,heapster}".yaml" {{ if .KubernetesDashboard.AdminPrivileges }} - kubectl apply -f "${mfdir}/cluster-role-bindings/kubernetes-dashboard-admin.yaml" + applyall "${mfdir}/cluster-role-bindings/kubernetes-dashboard-admin.yaml" {{- end }} # Roles and bindings - for manifest in {pod-nanny,kubernetes-dashboard}; do - kubectl apply -f "${mfdir}/roles/$manifest.yaml" - done - for manifest in {heapster-nanny,kubernetes-dashboard,metrics-server}; do - kubectl apply -f "${mfdir}/role-bindings/$manifest.yaml" - done + applyall "${mfdir}/roles"/{pod-nanny,kubernetes-dashboard}".yaml" + + applyall "${mfdir}/role-bindings"/{heapster-nanny,kubernetes-dashboard}".yaml" {{ if .Experimental.TLSBootstrap.Enabled }} - for manifest in {node-bootstrapper,kubelet-certificate-bootstrap}; do - kubectl apply -f "${mfdir}/cluster-roles/$manifest.yaml" - done + applyall "${mfdir}/cluster-roles"/{node-bootstrapper,kubelet-certificate-bootstrap}".yaml" - for manifest in {node-bootstrapper,kubelet-certificate-bootstrap}; do - kubectl apply -f "${mfdir}/cluster-role-bindings/$manifest.yaml" - done + applyall "${mfdir}/cluster-role-bindings"/{node-bootstrapper,kubelet-certificate-bootstrap}".yaml" {{ end }} {{if .Experimental.Kube2IamSupport.Enabled }} mfdir=/srv/kubernetes/manifests - kubectl apply -f "${mfdir}/kube2iam-rbac.yaml" - kubectl apply -f "${mfdir}/kube2iam-ds.yaml"; + applyall "${mfdir}/kube2iam-rbac.yaml" + applyall "${mfdir}/kube2iam-ds.yaml"; {{ end }} - path: /etc/kubernetes/cni/docker_opts_cni.env diff --git a/core/controlplane/config/templates/cluster.yaml b/core/controlplane/config/templates/cluster.yaml index 0b469cb2d..0a18c6d37 100644 --- a/core/controlplane/config/templates/cluster.yaml +++ b/core/controlplane/config/templates/cluster.yaml @@ -1194,6 +1194,10 @@ addons: rescheduler: enabled: false + # Metrics Server (https://github.com/kubernetes-incubator/metrics-server) + metricsServer: + enabled: false + # Experimental features will change in backward-incompatible ways experimental: # Enable admission controllers diff --git a/core/controlplane/config/templates/stack-template.json b/core/controlplane/config/templates/stack-template.json index 7cb603041..2fccae7a9 100644 --- a/core/controlplane/config/templates/stack-template.json +++ b/core/controlplane/config/templates/stack-template.json @@ -554,7 +554,7 @@ }], "HostedZoneTags" : [{ "Key": "kubernetes.io/cluster/{{$.ClusterName}}", - "Value": "true" + "Value": "owned" }] } }, @@ -664,7 +664,7 @@ { "Key": "kubernetes.io/cluster/{{$.ClusterName}}", "PropagateAtLaunch": "true", - "Value": "true" + "Value": "owned" }, { "Key": "Name", @@ -1618,7 +1618,7 @@ "Tags": [ { "Key": "kubernetes.io/cluster/{{.ClusterName}}", - "Value": "true" + "Value": "owned" }, { "Key": "Name", diff --git a/core/nodepool/config/templates/stack-template.json b/core/nodepool/config/templates/stack-template.json index 6735deaea..1e5a92496 100644 --- a/core/nodepool/config/templates/stack-template.json +++ b/core/nodepool/config/templates/stack-template.json @@ -144,7 +144,7 @@ { "Key": "kubernetes.io/cluster/{{ .ClusterName }}", "PropagateAtLaunch": "true", - "Value": "true" + "Value": "owned" }, { "Key": "kube-aws:node-pool:name", diff --git a/core/root/config/config.go b/core/root/config/config.go index 64518c2c6..b8479352a 100644 --- a/core/root/config/config.go +++ b/core/root/config/config.go @@ -144,6 +144,7 @@ func ConfigFromBytes(data []byte, plugins []*pluginmodel.Plugin) (*Config, error {c.Addons, "addons"}, {c.Addons.Rescheduler, "addons.rescheduler"}, {c.Addons.ClusterAutoscaler, "addons.clusterAutoscaler"}, + {c.Addons.MetricsServer, "addons.metricsServer"}, } for i, np := range c.Worker.NodePools { diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 4730c2d9c..dd01df1e0 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -1,6 +1,7 @@ # Summary * [Home](README.md) +* [Quick Start](tutorials/quick-start.md) * [Getting Started](getting-started/README.md) * [Prerequisites](getting-started/prerequisites.md) * [Step 1: Configure](getting-started/step-1-configure.md) @@ -20,10 +21,10 @@ * [Developer Guide](guides/developer-guide.md) * [Operator Guide](guides/operator-guide.md) * [Advanced Topics](advanced-topics/README.md) - * [etcd Backup & Restore](advanced-topics/etcd-backup-and-restore.md) * [CloudFormation Updates in CLI](advanced-topics/cloudformation-updates-in-cli.md) + * [etcd Backup & Restore](advanced-topics/etcd-backup-and-restore.md) + * [Kubernetes Dashboard Access](advanced-topics/kubernetes-dashboard.md) * [Use An Existing VPC](advanced-topics/use-an-existing-vpc.md) * [Troubleshooting](troubleshooting/README.md) * [Known Limitations](troubleshooting/known-limitations.md) * [Common Problems](troubleshooting/common-problems.md) -* [Quick Start \(WIP\)](tutorials/quick-start.md) diff --git a/docs/advanced-topics/README.md b/docs/advanced-topics/README.md index b51583112..96f45cfe0 100644 --- a/docs/advanced-topics/README.md +++ b/docs/advanced-topics/README.md @@ -1,6 +1,6 @@ # Advanced Topics -* [etcd Backup & Restore](etcd-backup-and-restore.md) - how to backup and restore etcd either manually or automatically * [CloudFormation Streaming](cloudformation-updates-in-cli.md) - stream CloudFormation updates during CLI commands `kube-aws up` and `kube-aws update` +* [etcd Backup & Restore](etcd-backup-and-restore.md) - how to backup and restore etcd either manually or automatically +* [Kubernetes Dashboard Access](kubernetes-dashboard.md) - how to expose and access the Kubernetes Dashboard * [Use An Existing VPC](use-an-existing-vpc.md) - how to deploy a Kubernetes cluster to an existing VPC -* [Kubernetes Dashboard Access and Authentication](kubernetes-dashboard.md) - how to expose and access the Kubernetes Dashboard diff --git a/docs/advanced-topics/high-availability.md b/docs/advanced-topics/high-availability.md new file mode 100644 index 000000000..f0da1a5c7 --- /dev/null +++ b/docs/advanced-topics/high-availability.md @@ -0,0 +1,12 @@ +# High Availability + +To achieve high availability using kube-aws, it is recommended to: + +* Specify at least 3 for `etcd.count` in `cluster.yaml`. See [Optimal Cluster Size](https://coreos.com/etcd/docs/latest/v2/admin_guide.html#optimal-cluster-size) for details of etcd recommendations +* Specify at least 2 for `controller.count` in `cluster.yaml` +* Use 2 or more worker nodes, +* Avoid `t2.medium` or smaller instances for etcd and controller nodes. See [this issue](https://github.com/kubernetes-incubator/kube-aws/issues/138) for some additional discussion. + +# Additional Reading + +There's some additional documentation about [Building High-Availability Clusters](https://kubernetes.io/docs/admin/high-availability/) on the main Kubernetes documentation site. Although kube-aws will taken care of most of those concerns for you, it can be worth a read for a deeper understanding. diff --git a/docs/cli-reference/README.md b/docs/cli-reference/README.md index 21ddb833e..0f041545d 100644 --- a/docs/cli-reference/README.md +++ b/docs/cli-reference/README.md @@ -15,6 +15,7 @@ Initialize the base configuration for a cluster ready for customization prior to | `hosted-zone-id` | The hosted zone in which a Route53 record set for a k8s API endpoint is created | none | | `key-name` | The AWS key-pair for SSH access to nodes | none | | `kms-key-arn` | The ARN of the AWS KMS key for encrypting TLS assets | +| `no-record-set` | Instruct kube-aws to not manage Route53 record sets for your K8S API | `false` | | `region` | The AWS region to deploy to | none | ### `init` example @@ -22,9 +23,10 @@ Initialize the base configuration for a cluster ready for customization prior to ```bash $ kube-aws init \ --cluster-name=my-cluster \ - --external-dns-name=my-cluster-endpoint.mydomain.com \ --region=us-west-1 \ --availability-zone=us-west-1c \ + --hosted-zone-id=xxxxxxxxxxxxxx \ + --external-dns-name=my-cluster-endpoint.mydomain.com \ --key-name=key-pair-name \ --kms-key-arn="arn:aws:kms:us-west-1:xxxxxxxxxx:key/xxxxxxxxxxxxxxxxxxx" ``` diff --git a/docs/tutorials/quick-start.md b/docs/tutorials/quick-start.md index 0fa9f6315..7e3b0bfb3 100644 --- a/docs/tutorials/quick-start.md +++ b/docs/tutorials/quick-start.md @@ -1,125 +1,131 @@ # Quick Start -Deploy a fully-functional Kubernetes cluster using AWS CloudFormation. +Get started with kube-aws and deploy a fully-functional Kubernetes cluster running on CoreOS Container Linux using AWS CloudFormation. -Your cluster will be configured to use AWS features to enhance Kubernetes. +After completing this guide, you will be able to deploy applications to Kubernetes on AWS and interact with the Kubernetes API using the `kubectl` CLI tool. -For example, Kubernetes may automatically provision an Elastic Load Balancer for each Kubernetes Service. +# Pre-requisites -After completing this guide, a deployer will be able to interact with the Kubernetes API from their workstation using the `kubectl` CLI tool. +Prior to using setting up your first Kubernetes cluster using kube-aws, you will need to setup the following. More details on each pre-requisite are available in the rest of the documentation. -# Pre-requisites {#pre-requisites} +1. [Install](http://docs.aws.amazon.com/cli/latest/userguide/installing.html) and [Configure](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html) the AWS CLI +1. [Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) which is the CLI for controlling a Kubernetes cluster +1. Create an [EC2 Key Pair](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) in your chosen AWS region and record the name of the key for step 2 in this guide +1. Have a Route 53 hosted zone ready to expose the Kubernetes API and record the hosted zone ID and domain name for step 2 in this guide +1. Create a [KMS Key](http://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html) in your chosen AWS region and record the ARN for step 2 in this guide +1. Create an [S3 Bucket](http://docs.aws.amazon.com/AmazonS3/latest/gsg/CreatingABucket.html) to store kube-aws assets and record the bucket name for step 2 and 3 in this guide -If you're deploying a cluster with kube-aws: +# Step 1: Download kube-aws -* [EC2 instances whose types are larger than or equal to `t2.medium` should be chosen for the cluster to work reliably](https://github.com/kubernetes-incubator/kube-aws/issues/138) -* [At least 3 etcd, 2 controller, 2 worker nodes are required to achieve high availability](https://github.com/kubernetes-incubator/kube-aws/issues/138#issuecomment-266432162) -* If you wish to deploy to an existing VPC, there is additional information on [Use An Existing VPC](/advanced-topics/use-an-existing-vpc.md) not covered by this getting started guide. +Go to the [releases](https://github.com/kubernetes-incubator/kube-aws/releases) and download the latest release tarball for your architecture. Extract the binary and add kube-aws to your path: -Once you understand the pre-requisites, you are ready to launch your first Kubernetes cluster. - -# Step 1: Configure {#step1} - -Step 1 will cover: - -* Downloading kube-aws -* Defining account and cluster settings - -## Download kube-aws - -Go to the [releases](https://github.com/kubernetes-incubator/kube-aws/releases) and download the latest release tarball for your architecture. Extract the binary: - -``` -tar zxvf kube-aws-${PLATFORM}.tar.gz +```bash +➜ tar zxvf kube-aws-${PLATFORM}.tar.gz +➜ sudo mv ${PLATFORM}/kube-aws /usr/local/bin +➜ kube-aws --help ``` -Add kube-aws to your path: +# Step 2: Render +First run `init` using the information from the pre-requisites section. For example: + +```bash +➜ kube-aws init \ + --cluster-name=quick-start-k8 \ + --region=us-west-1 \ + --availability-zone=us-west-1a \ + --hosted-zone-id=ZBN159WIK8JJD \ + --external-dns-name=quick-start-k8s.mycompany.com \ + --key-name=ec2-key-pair-name \ + --kms-key-arn="arn:aws:kms:us-west-1:123456789012:key/c4f79cb0-f9fb-434a-ac3c-47c5697d51e6" ``` -mv ${PLATFORM}/kube-aws /usr/local/bin -``` - -## Configure AWS credentials -Configure your local workstation with AWS credentials using one of the following methods: +This will generate a `cluster.yaml` file which forms the main configuration for your new cluster. The `cluster.yaml` has many options to adjust your cluster, leave them as the defaults for now. -**Method 1: Configure command** +Next use `render credentials` to generate new credentials for your cluster into the `credentials` directory: -Provide the values of your AWS access and secret keys, and optionally default region and output format: - -``` -$ aws configure -AWS Access Key ID [None]: AKID1234567890 -AWS Secret Access Key [None]: MY-SECRET-KEY -Default region name [None]: us-west-2 -Default output format [None]: text +```bash +➜ kube-aws render credentials --generate-ca ``` -**Method 2: Config file** +The files generated are TLS assets which allow communication between nodes and also allow super admins to administer the cluster. After the quick start you may wish to use your own CA assets. -Write your credentials into the file \`~/.aws/credentials\` using the following template: +Next use `render stack` to generate the CloudFormation stack templates and user data into the `stack-templates` and `userdata` directories: -``` -[default] -aws_access_key_id = AKID1234567890 -aws_secret_access_key = MY-SECRET-KEY +```bash +➜ kube-aws render stack ``` -**Method 3: Environment variables** +The files generated form the basis of the deployment. -Provide AWS credentials to kube-aws by exporting the following environment variables: +Before we move onto deploying, let's run `validate` to check the work above using the S3 bucket name from the pre-requisites section. For example: -``` -export AWS_ACCESS_KEY_ID=AKID1234567890 -export AWS_SECRET_ACCESS_KEY=MY-SECRET-KEY +```bash +➜ kube-aws validate --s3-uri=s3://kube-aws-assets/ ``` -## Test Credentials +# Step 3: Launch -Test that your credentials work by describing any instances you may already have running on your account: +Now you've generated and validated the various assets needed to launch a new cluster, let's run the deploy! Run `up` using the S3 bucket name from the pre-requisites section. For example: -``` -$ aws ec2 describe-instances +```bash +➜ kube-aws up --s3-uri=s3://kube-aws-assets/ ``` -# Step 2: Render +# Step 4: Deploy an Application -Step 2 will cover: +Let's deploy our first application to the new cluster, nginx is easy to start with: -* Compiling a re-usable CloudFormation template for the cluster -* Optionally adjust template configuration -* Validate the rendered CloudFormation stack +```bash +➜ export KUBECONFIG=$PWD/kubeconfig +➜ kubectl run quick-start-nginx --image=nginx --port=80 +deployment "quick-start-nginx" created -# Step 3: Launch +➜ kubectl get pods +NAME READY STATUS RESTARTS AGE +quick-start-nginx-6687bdfc67-6qsr8 1/1 Running 0 10s +``` -Step 3 will cover: +You can see above the pod is running and ready. To try it out we can forward a local port to the pod: -* Create the CloudFormation stack and start our EC2 machines -* Set up CLI access to the new cluster +```bash +➜ kubectl port-forward $(kubectl get pods -l "run=quick-start-nginx" -o jsonpath="{.items[0].metadata.name}") 8080:80 +Forwarding from 127.0.0.1:8080 -> 80 +``` + +Then load the nginx home page in a browser: -# Step 4: Update +```bash +➜ open http://localhost:8080/ +``` -* Update the CloudFormation stack +You should see a `Welcome to nginx!` page. -# Step 5: Add Node Pool +If you'd like to try exposing a public load balancer, first run: -Step 5 will cover: +```bash +➜ kubectl expose deployment quick-start-nginx --port=80 --type=LoadBalancer +``` -* Create the additional pool of worker nodes -* Adjust template configuration for each pool of worker nodes -* Required to support [cluster-autoscaler](https://github.com/kubernetes/contrib/tree/master/cluster-autoscaler) +Wait a few seconds for Kubernetes to create an AWS ELB to to expose the service and then run: -# Step 6: Configure Add-ons +```bash +➜ open http://$(kubectl get svc quick-start-nginx -o jsonpath="{.status.loadBalancer.ingress[0].hostname}") +``` -Step 6 will cover: +You should see the same `Welcome to nginx!` page as above. -* Configure various Kubernetes add-ons +The above commands demonstrate some basic `kubectl` imperative commands to create a Kubernetes Deployment and Service object. Declarative object configuration is also available, for more information see [Kubernetes Object Management](https://kubernetes.io/docs/tutorials/object-management-kubectl/object-management/). -# Step 7: Destroy +# Step 5: Tear Down -Step 7 will cover: +Once you no longer need the quick start cluster created during this guide, tear it down: -* Tearing down the cluster +```bash +➜ kubectl delete svc quick-start-nginx +service "quick-start-nginx" deleted -Let's get started. +➜ kube-aws destroy +``` +The first command deletes the Service object created in step 4 so the AWS ELB is removed otherwise the network interface attachments may block the CloudFormation stack from being deleted. \ No newline at end of file diff --git a/model/addons.go b/model/addons.go index 347de54bb..e6b6dbeea 100644 --- a/model/addons.go +++ b/model/addons.go @@ -3,6 +3,7 @@ package model type Addons struct { Rescheduler Rescheduler `yaml:"rescheduler"` ClusterAutoscaler ClusterAutoscalerSupport `yaml:"clusterAutoscaler,omitempty"` + MetricsServer MetricsServer `yaml:"metricsServer,omitempty"` UnknownKeys `yaml:",inline"` } @@ -15,3 +16,8 @@ type Rescheduler struct { Enabled bool `yaml:"enabled"` UnknownKeys `yaml:",inline"` } + +type MetricsServer struct { + Enabled bool `yaml:"enabled"` + UnknownKeys `yaml:",inline"` +} diff --git a/test/integration/maincluster_test.go b/test/integration/maincluster_test.go index c7c6510fc..4d6e33ab1 100644 --- a/test/integration/maincluster_test.go +++ b/test/integration/maincluster_test.go @@ -405,6 +405,8 @@ addons: enabled: true clusterAutoscaler: enabled: true + metricsServer: + enabled: true worker: nodePools: - name: pool1 @@ -420,6 +422,9 @@ worker: ClusterAutoscaler: model.ClusterAutoscalerSupport{ Enabled: true, }, + MetricsServer: model.MetricsServer{ + Enabled: true, + }, } actual := c.Addons