From ed0e70244482957423a07c6a41467aeb27b4058d Mon Sep 17 00:00:00 2001 From: June Yi Date: Sat, 1 Dec 2018 16:31:35 +0900 Subject: [PATCH] Final Korean l10n work for dev-1.13 (#11440) * Update outdated l10n(ko) contents (#11425) fixes #11424 * Remove references to etcd2 in content/ko (#11416) * Resolve conflicts against master for /ko contents (#11438) --- .../ko/docs/setup/custom-cloud/master.yaml | 142 ---- content/ko/docs/setup/custom-cloud/node.yaml | 92 --- .../setup/independent/control-plane-flags.md | 82 --- .../independent/create-cluster-kubeadm.md | 621 ------------------ .../setup/independent/high-availability.md | 552 ---------------- .../docs/setup/independent/install-kubeadm.md | 253 ------- .../independent/setup-ha-etcd-with-kubeadm.md | 263 -------- .../independent/troubleshooting-kubeadm.md | 266 -------- content/ko/docs/setup/pick-right-solution.md | 2 +- 9 files changed, 1 insertion(+), 2272 deletions(-) delete mode 100644 content/ko/docs/setup/custom-cloud/master.yaml delete mode 100644 content/ko/docs/setup/custom-cloud/node.yaml delete mode 100644 content/ko/docs/setup/independent/control-plane-flags.md delete mode 100644 content/ko/docs/setup/independent/create-cluster-kubeadm.md delete mode 100644 content/ko/docs/setup/independent/high-availability.md delete mode 100644 content/ko/docs/setup/independent/install-kubeadm.md delete mode 100644 content/ko/docs/setup/independent/setup-ha-etcd-with-kubeadm.md delete mode 100644 content/ko/docs/setup/independent/troubleshooting-kubeadm.md diff --git a/content/ko/docs/setup/custom-cloud/master.yaml b/content/ko/docs/setup/custom-cloud/master.yaml deleted file mode 100644 index 5b7df1bd77d70..0000000000000 --- a/content/ko/docs/setup/custom-cloud/master.yaml +++ /dev/null @@ -1,142 +0,0 @@ -#cloud-config - ---- -write-files: - - path: /etc/conf.d/nfs - permissions: '0644' - content: | - OPTS_RPC_MOUNTD="" - - path: /opt/bin/wupiao - permissions: '0755' - content: | - #!/bin/bash - # [w]ait [u]ntil [p]ort [i]s [a]ctually [o]pen - [ -n "$1" ] && \ - until curl -o /dev/null -sIf http://${1}; do \ - sleep 1 && echo .; - done; - exit $? - -hostname: master -coreos: - etcd2: - name: master - listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 - advertise-client-urls: http://$private_ipv4:2379,http://$private_ipv4:4001 - initial-cluster-token: k8s_etcd - listen-peer-urls: http://$private_ipv4:2380,http://$private_ipv4:7001 - initial-advertise-peer-urls: http://$private_ipv4:2380 - initial-cluster: master=http://$private_ipv4:2380 - initial-cluster-state: new - fleet: - metadata: "role=master" - units: - - name: etcd2.service - command: start - - name: generate-serviceaccount-key.service - command: start - content: | - [Unit] - Description=Generate service-account key file - - [Service] - ExecStartPre=-/usr/bin/mkdir -p /opt/bin - ExecStart=/bin/openssl genrsa -out /opt/bin/kube-serviceaccount.key 2048 2>/dev/null - RemainAfterExit=yes - Type=oneshot - - name: setup-network-environment.service - command: start - content: | - [Unit] - Description=Setup Network Environment - Documentation=https://github.com/kelseyhightower/setup-network-environment - Requires=network-online.target - After=network-online.target - - [Service] - ExecStartPre=-/usr/bin/mkdir -p /opt/bin - ExecStartPre=/usr/bin/curl -L -o /opt/bin/setup-network-environment -z /opt/bin/setup-network-environment https://github.com/kelseyhightower/setup-network-environment/releases/download/v1.0.0/setup-network-environment - ExecStartPre=/usr/bin/chmod +x /opt/bin/setup-network-environment - ExecStart=/opt/bin/setup-network-environment - RemainAfterExit=yes - Type=oneshot - - name: fleet.service - command: start - - name: flanneld.service - command: start - drop-ins: - - name: 50-network-config.conf - content: | - [Unit] - Requires=etcd2.service - [Service] - ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{"Network":"10.244.0.0/16", "Backend": {"Type": "vxlan"}}' - - name: docker.service - command: start - - name: kube-apiserver.service - command: start - content: | - [Unit] - Description=Kubernetes API Server - Documentation=https://github.com/kubernetes/kubernetes - Requires=setup-network-environment.service etcd2.service generate-serviceaccount-key.service - After=setup-network-environment.service etcd2.service generate-serviceaccount-key.service - - [Service] - EnvironmentFile=/etc/network-environment - ExecStartPre=-/usr/bin/mkdir -p /opt/bin - ExecStartPre=/usr/bin/curl -L -o /opt/bin/kube-apiserver -z /opt/bin/kube-apiserver https://storage.googleapis.com/kubernetes-release/release/v1.1.2/bin/linux/amd64/kube-apiserver - ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-apiserver - ExecStartPre=/opt/bin/wupiao 127.0.0.1:2379/v2/machines - ExecStart=/opt/bin/kube-apiserver \ - --service-account-key-file=/opt/bin/kube-serviceaccount.key \ - --service-account-lookup=false \ - --enable-admission-plugins=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota \ - --runtime-config=api/v1 \ - --allow-privileged=true \ - --insecure-bind-address=0.0.0.0 \ - --insecure-port=8080 \ - --kubelet-https=true \ - --secure-port=6443 \ - --service-cluster-ip-range=10.100.0.0/16 \ - --etcd-servers=http://127.0.0.1:2379 \ - --public-address-override=${DEFAULT_IPV4} \ - --logtostderr=true - Restart=always - RestartSec=10 - - name: kube-controller-manager.service - command: start - content: | - [Unit] - Description=Kubernetes Controller Manager - Documentation=https://github.com/kubernetes/kubernetes - Requires=kube-apiserver.service - After=kube-apiserver.service - - [Service] - ExecStartPre=/usr/bin/curl -L -o /opt/bin/kube-controller-manager -z /opt/bin/kube-controller-manager https://storage.googleapis.com/kubernetes-release/release/v1.1.2/bin/linux/amd64/kube-controller-manager - ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-controller-manager - ExecStart=/opt/bin/kube-controller-manager \ - --service-account-private-key-file=/opt/bin/kube-serviceaccount.key \ - --master=127.0.0.1:8080 \ - --logtostderr=true - Restart=always - RestartSec=10 - - name: kube-scheduler.service - command: start - content: | - [Unit] - Description=Kubernetes Scheduler - Documentation=https://github.com/kubernetes/kubernetes - Requires=kube-apiserver.service - After=kube-apiserver.service - - [Service] - ExecStartPre=/usr/bin/curl -L -o /opt/bin/kube-scheduler -z /opt/bin/kube-scheduler https://storage.googleapis.com/kubernetes-release/release/v1.1.2/bin/linux/amd64/kube-scheduler - ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-scheduler - ExecStart=/opt/bin/kube-scheduler --master=127.0.0.1:8080 - Restart=always - RestartSec=10 - update: - group: alpha - reboot-strategy: off diff --git a/content/ko/docs/setup/custom-cloud/node.yaml b/content/ko/docs/setup/custom-cloud/node.yaml deleted file mode 100644 index 9f5caff49bc3e..0000000000000 --- a/content/ko/docs/setup/custom-cloud/node.yaml +++ /dev/null @@ -1,92 +0,0 @@ -#cloud-config -write-files: - - path: /opt/bin/wupiao - permissions: '0755' - content: | - #!/bin/bash - # [w]ait [u]ntil [p]ort [i]s [a]ctually [o]pen - [ -n "$1" ] && [ -n "$2" ] && while ! curl --output /dev/null \ - --silent --head --fail \ - http://${1}:${2}; do sleep 1 && echo -n .; done; - exit $? -coreos: - etcd2: - listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 - advertise-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 - initial-cluster: master=http://:2380 - proxy: on - fleet: - metadata: "role=node" - units: - - name: etcd2.service - command: start - - name: fleet.service - command: start - - name: flanneld.service - command: start - - name: docker.service - command: start - - name: setup-network-environment.service - command: start - content: | - [Unit] - Description=Setup Network Environment - Documentation=https://github.com/kelseyhightower/setup-network-environment - Requires=network-online.target - After=network-online.target - - [Service] - ExecStartPre=-/usr/bin/mkdir -p /opt/bin - ExecStartPre=/usr/bin/curl -L -o /opt/bin/setup-network-environment -z /opt/bin/setup-network-environment https://github.com/kelseyhightower/setup-network-environment/releases/download/v1.0.0/setup-network-environment - ExecStartPre=/usr/bin/chmod +x /opt/bin/setup-network-environment - ExecStart=/opt/bin/setup-network-environment - RemainAfterExit=yes - Type=oneshot - - name: kube-proxy.service - command: start - content: | - [Unit] - Description=Kubernetes Proxy - Documentation=https://github.com/kubernetes/kubernetes - Requires=setup-network-environment.service - After=setup-network-environment.service - - [Service] - ExecStartPre=/usr/bin/curl -L -o /opt/bin/kube-proxy -z /opt/bin/kube-proxy https://storage.googleapis.com/kubernetes-release/release/v1.1.2/bin/linux/amd64/kube-proxy - ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-proxy - # wait for kubernetes master to be up and ready - ExecStartPre=/opt/bin/wupiao 8080 - ExecStart=/opt/bin/kube-proxy \ - --master=:8080 \ - --logtostderr=true - Restart=always - RestartSec=10 - - name: kube-kubelet.service - command: start - content: | - [Unit] - Description=Kubernetes Kubelet - Documentation=https://github.com/kubernetes/kubernetes - Requires=setup-network-environment.service - After=setup-network-environment.service - - [Service] - EnvironmentFile=/etc/network-environment - ExecStartPre=/usr/bin/curl -L -o /opt/bin/kubelet -z /opt/bin/kubelet https://storage.googleapis.com/kubernetes-release/release/v1.1.2/bin/linux/amd64/kubelet - ExecStartPre=/usr/bin/chmod +x /opt/bin/kubelet - # wait for kubernetes master to be up and ready - ExecStartPre=/opt/bin/wupiao 8080 - ExecStart=/opt/bin/kubelet \ - --address=0.0.0.0 \ - --port=10250 \ - --hostname-override=${DEFAULT_IPV4} \ - --api-servers=:8080 \ - --allow-privileged=true \ - --logtostderr=true \ - --healthz-bind-address=0.0.0.0 \ - --healthz-port=10248 - Restart=always - RestartSec=10 - update: - group: alpha - reboot-strategy: off diff --git a/content/ko/docs/setup/independent/control-plane-flags.md b/content/ko/docs/setup/independent/control-plane-flags.md deleted file mode 100644 index 147b55f20f495..0000000000000 --- a/content/ko/docs/setup/independent/control-plane-flags.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: Customizing control plane configuration with kubeadm -content_template: templates/concept -weight: 40 ---- - -{{% capture overview %}} - -The kubeadm configuration exposes the following fields that can override the default flags passed to control plane components such as the APIServer, ControllerManager and Scheduler: - -- `APIServerExtraArgs` -- `ControllerManagerExtraArgs` -- `SchedulerExtraArgs` - -These fields consist of `key: value` pairs. To override a flag for a control plane component: - -1. Add the appropriate field to your configuration. -2. Add the flags to override to the field. - -For more details on each field in the configuration you can navigate to our -[API reference pages](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm#ClusterConfiguration). - -{{% /capture %}} - -{{% capture body %}} - -## APIServer flags - -For details, see the [reference documentation for kube-apiserver](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/). - -Example usage: -```yaml -apiVersion: kubeadm.k8s.io/v1beta1 -kind: ClusterConfiguration -kubernetesVersion: v1.13.0 -metadata: - name: 1.13-sample -apiServer: - extraArgs: - advertise-address: 192.168.0.103 - anonymous-auth: false - enable-admission-plugins: AlwaysPullImages,DefaultStorageClass - audit-log-path: /home/johndoe/audit.log -``` - -## ControllerManager flags - -For details, see the [reference documentation for kube-controller-manager](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/). - -Example usage: -```yaml -apiVersion: kubeadm.k8s.io/v1beta1 -kind: ClusterConfiguration -kubernetesVersion: v1.13.0 -metadata: - name: 1.13-sample -controllerManager: - extraArgs: - cluster-signing-key-file: /home/johndoe/keys/ca.key - bind-address: 0.0.0.0 - deployment-controller-sync-period: 50 -``` - -## Scheduler flags - -For details, see the [reference documentation for kube-scheduler](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/). - -Example usage: -```yaml -apiVersion: kubeadm.k8s.io/v1beta1 -kind: ClusterConfiguration -kubernetesVersion: v1.13.0 -metadata: - name: 1.13-sample -scheduler: - extraArgs: - address: 0.0.0.0 - config: /home/johndoe/schedconfig.yaml - kubeconfig: /home/johndoe/kubeconfig.yaml -``` - -{{% /capture %}} diff --git a/content/ko/docs/setup/independent/create-cluster-kubeadm.md b/content/ko/docs/setup/independent/create-cluster-kubeadm.md deleted file mode 100644 index 806bf369d1e88..0000000000000 --- a/content/ko/docs/setup/independent/create-cluster-kubeadm.md +++ /dev/null @@ -1,621 +0,0 @@ ---- -title: Creating a single master cluster with kubeadm -content_template: templates/task -weight: 30 ---- - -{{% capture overview %}} - -**kubeadm** helps you bootstrap a minimum viable Kubernetes cluster that conforms to best practices. With kubeadm, your cluster should pass [Kubernetes Conformance tests](https://kubernetes.io/blog/2017/10/software-conformance-certification). Kubeadm also supports other cluster -lifecycle functions, such as upgrades, downgrade, and managing [bootstrap tokens](/docs/reference/access-authn-authz/bootstrap-tokens/). - -Because you can install kubeadm on various types of machine (e.g. laptop, server, -Raspberry Pi, etc.), it's well suited for integration with provisioning systems -such as Terraform or Ansible. - -kubeadm's simplicity means it can serve a wide range of use cases: - -- New users can start with kubeadm to try Kubernetes out for the first time. -- Users familiar with Kubernetes can spin up clusters with kubeadm and test their applications. -- Larger projects can include kubeadm as a building block in a more complex system that can also include other installer tools. - -kubeadm is designed to be a simple way for new users to start trying -Kubernetes out, possibly for the first time, a way for existing users to -test their application on and stitch together a cluster easily, and also to be -a building block in other ecosystem and/or installer tool with a larger -scope. - -You can install _kubeadm_ very easily on operating systems that support -installing deb or rpm packages. The responsible SIG for kubeadm, -[SIG Cluster Lifecycle](https://github.com/kubernetes/community/tree/master/sig-cluster-lifecycle), provides these packages pre-built for you, -but you may also build them from source for other OSes. - - -### kubeadm Maturity - -| Area | Maturity Level | -|---------------------------|--------------- | -| Command line UX | beta | -| Implementation | beta | -| Config file API | alpha | -| Self-hosting | alpha | -| kubeadm alpha subcommands | alpha | -| CoreDNS | GA | -| DynamicKubeletConfig | alpha | - - -kubeadm's overall feature state is **Beta** and will soon be graduated to -**General Availability (GA)** during 2018. Some sub-features, like self-hosting -or the configuration file API are still under active development. The -implementation of creating the cluster may change slightly as the tool evolves, -but the overall implementation should be pretty stable. Any commands under -`kubeadm alpha` are by definition, supported on an alpha level. - - -### Support timeframes - -Kubernetes releases are generally supported for nine months, and during that -period a patch release may be issued from the release branch if a severe bug or -security issue is found. Here are the latest Kubernetes releases and the support -timeframe; which also applies to `kubeadm`. - -| Kubernetes version | Release month | End-of-life-month | -|--------------------|----------------|-------------------| -| v1.6.x | March 2017 | December 2017 | -| v1.7.x | June 2017 | March 2018 | -| v1.8.x | September 2017 | June 2018 | -| v1.9.x | December 2017 | September 2018   | -| v1.10.x | March 2018 | December 2018   | -| v1.11.x | June 2018 | March 2019   | -| v1.12.x | September 2018 | June 2019   | - -{{% /capture %}} - -{{% capture prerequisites %}} - -- One or more machines running a deb/rpm-compatible OS, for example Ubuntu or CentOS -- 2 GB or more of RAM per machine. Any less leaves little room for your - apps. -- 2 CPUs or more on the master -- Full network connectivity among all machines in the cluster. A public or - private network is fine. - -{{% /capture %}} - -{{% capture steps %}} - -## Objectives - -* Install a single master Kubernetes cluster or [high availability cluster](https://kubernetes.io/docs/setup/independent/high-availability/) -* Install a Pod network on the cluster so that your Pods can - talk to each other - -## Instructions - -### Installing kubeadm on your hosts - -See ["Installing kubeadm"](/docs/setup/independent/install-kubeadm/). - -{{< note >}} -**Note:** If you have already installed kubeadm, run `apt-get update && -apt-get upgrade` or `yum update` to get the latest version of kubeadm. - -When you upgrade, the kubelet restarts every few seconds as it waits in a crashloop for -kubeadm to tell it what to do. This crashloop is expected and normal. -After you initialize your master, the kubelet runs normally. -{{< /note >}} - -### Initializing your master - -The master is the machine where the control plane components run, including -etcd (the cluster database) and the API server (which the kubectl CLI -communicates with). - -1. Choose a pod network add-on, and verify whether it requires any arguments to -be passed to kubeadm initialization. Depending on which -third-party provider you choose, you might need to set the `--pod-network-cidr` to -a provider-specific value. See [Installing a pod network add-on](#pod-network). -1. (Optional) Unless otherwise specified, kubeadm uses the network interface associated -with the default gateway to advertise the master's IP. To use a different -network interface, specify the `--apiserver-advertise-address=` argument -to `kubeadm init`. To deploy an IPv6 Kubernetes cluster using IPv6 addressing, you -must specify an IPv6 address, for example `--apiserver-advertise-address=fd00::101` -1. (Optional) Run `kubeadm config images pull` prior to `kubeadm init` to verify -connectivity to gcr.io registries. - -Now run: - -```bash -kubeadm init -``` - -### More information - -For more information about `kubeadm init` arguments, see the [kubeadm reference guide](/docs/reference/setup-tools/kubeadm/kubeadm/). - -For a complete list of configuration options, see the [configuration file documentation](/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file). - -To customize control plane components, including optional IPv6 assignment to liveness probe for control plane components and etcd server, provide extra arguments to each component as documented in [custom arguments](/docs/admin/kubeadm#custom-args). - -To run `kubeadm init` again, you must first [tear down the cluster](#tear-down). - -If you join a node with a different architecture to your cluster, create a separate -Deployment or DaemonSet for `kube-proxy` and `kube-dns` on the node. This is because the Docker images for these -components do not currently support multi-architecture. - -`kubeadm init` first runs a series of prechecks to ensure that the machine -is ready to run Kubernetes. These prechecks expose warnings and exit on errors. `kubeadm init` -then downloads and installs the cluster control plane components. This may take several minutes. -The output should look like: - -```none -[init] Using Kubernetes version: vX.Y.Z -[preflight] Running pre-flight checks -[kubeadm] WARNING: starting in 1.8, tokens expire after 24 hours by default (if you require a non-expiring token use --token-ttl 0) -[certificates] Generated ca certificate and key. -[certificates] Generated apiserver certificate and key. -[certificates] apiserver serving cert is signed for DNS names [kubeadm-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.138.0.4] -[certificates] Generated apiserver-kubelet-client certificate and key. -[certificates] Generated sa key and public key. -[certificates] Generated front-proxy-ca certificate and key. -[certificates] Generated front-proxy-client certificate and key. -[certificates] Valid certificates and keys now exist in "/etc/kubernetes/pki" -[kubeconfig] Wrote KubeConfig file to disk: "admin.conf" -[kubeconfig] Wrote KubeConfig file to disk: "kubelet.conf" -[kubeconfig] Wrote KubeConfig file to disk: "controller-manager.conf" -[kubeconfig] Wrote KubeConfig file to disk: "scheduler.conf" -[controlplane] Wrote Static Pod manifest for component kube-apiserver to "/etc/kubernetes/manifests/kube-apiserver.yaml" -[controlplane] Wrote Static Pod manifest for component kube-controller-manager to "/etc/kubernetes/manifests/kube-controller-manager.yaml" -[controlplane] Wrote Static Pod manifest for component kube-scheduler to "/etc/kubernetes/manifests/kube-scheduler.yaml" -[etcd] Wrote Static Pod manifest for a local etcd instance to "/etc/kubernetes/manifests/etcd.yaml" -[init] Waiting for the kubelet to boot up the control plane as Static Pods from directory "/etc/kubernetes/manifests" -[init] This often takes around a minute; or longer if the control plane images have to be pulled. -[apiclient] All control plane components are healthy after 39.511972 seconds -[uploadconfig] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace -[markmaster] Will mark node master as master by adding a label and a taint -[markmaster] Master master tainted and labelled with key/value: node-role.kubernetes.io/master="" -[bootstraptoken] Using token: -[bootstraptoken] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials -[bootstraptoken] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token -[bootstraptoken] Creating the "cluster-info" ConfigMap in the "kube-public" namespace -[addons] Applied essential addon: CoreDNS -[addons] Applied essential addon: kube-proxy - -Your Kubernetes master has initialized successfully! - -To start using your cluster, you need to run (as a regular user): - - mkdir -p $HOME/.kube - sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config - sudo chown $(id -u):$(id -g) $HOME/.kube/config - -You should now deploy a pod network to the cluster. -Run "kubectl apply -f [podnetwork].yaml" with one of the addon options listed at: - http://kubernetes.io/docs/admin/addons/ - -You can now join any number of machines by running the following on each node -as root: - - kubeadm join --token : --discovery-token-ca-cert-hash sha256: -``` - -To make kubectl work for your non-root user, run these commands, which are -also part of the `kubeadm init` output: - -```bash -mkdir -p $HOME/.kube -sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config -sudo chown $(id -u):$(id -g) $HOME/.kube/config -``` - -Alternatively, if you are the `root` user, you can run: - -```bash -export KUBECONFIG=/etc/kubernetes/admin.conf -``` - -Make a record of the `kubeadm join` command that `kubeadm init` outputs. You -need this command to [join nodes to your cluster](#join-nodes). - -The token is used for mutual authentication between the master and the joining -nodes. The token included here is secret. Keep it safe, because anyone with this -token can add authenticated nodes to your cluster. These tokens can be listed, -created, and deleted with the `kubeadm token` command. See the -[kubeadm reference guide](/docs/reference/setup-tools/kubeadm/kubeadm-token/). - -### Installing a pod network add-on {#pod-network} - -{{< caution >}} -**Caution:** This section contains important information about installation and deployment order. Read it carefully before proceeding. -{{< /caution >}} - -You must install a pod network add-on so that your pods can communicate with -each other. - -**The network must be deployed before any applications. Also, CoreDNS will not start up before a network is installed. -kubeadm only supports Container Network Interface (CNI) based networks (and does not support kubenet).** - -Several projects provide Kubernetes pod networks using CNI, some of which also -support [Network Policy](/docs/concepts/services-networking/networkpolicies/). See the [add-ons page](/docs/concepts/cluster-administration/addons/) for a complete list of available network add-ons. -- IPv6 support was added in [CNI v0.6.0](https://github.com/containernetworking/cni/releases/tag/v0.6.0). -- [CNI bridge](https://github.com/containernetworking/plugins/blob/master/plugins/main/bridge/README.md) and [local-ipam](https://github.com/containernetworking/plugins/blob/master/plugins/ipam/host-local/README.md) are the only supported IPv6 network plugins in Kubernetes version 1.9. - -Note that kubeadm sets up a more secure cluster by default and enforces use of [RBAC](/docs/reference/access-authn-authz/rbac/). -Make sure that your network manifest supports RBAC. - -You can install a pod network add-on with the following command: - -```bash -kubectl apply -f -``` - -You can install only one pod network per cluster. - -{{< tabs name="tabs-pod-install" >}} -{{% tab name="Choose one..." %}} -Please select one of the tabs to see installation instructions for the respective third-party Pod Network Provider. -{{% /tab %}} - -{{% tab name="Calico" %}} -For more information about using Calico, see [Quickstart for Calico on Kubernetes](https://docs.projectcalico.org/latest/getting-started/kubernetes/), [Installing Calico for policy and networking](https://docs.projectcalico.org/latest/getting-started/kubernetes/installation/calico), and other related resources. - -For Calico to work correctly, you need to pass `--pod-network-cidr=192.168.0.0/16` to `kubeadm init` or update the `calico.yml` file to match your Pod network. Note that Calico works on `amd64` only. - -```shell -kubectl apply -f https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml -kubectl apply -f https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml -``` - -{{% /tab %}} -{{% tab name="Canal" %}} -Canal uses Calico for policy and Flannel for networking. Refer to the Calico documentation for the [official getting started guide](https://docs.projectcalico.org/latest/getting-started/kubernetes/installation/flannel). - -For Canal to work correctly, `--pod-network-cidr=10.244.0.0/16` has to be passed to `kubeadm init`. Note that Canal works on `amd64` only. - -```shell -kubectl apply -f https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/canal/rbac.yaml -kubectl apply -f https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/canal/canal.yaml -``` - -{{% /tab %}} - -{{% tab name="Cilium" %}} -For more information about using Cilium with Kubernetes, see [Quickstart for Cilium on Kubernetes](http://docs.cilium.io/en/v1.2/kubernetes/quickinstall/) and [Kubernetes Install guide for Cilium](http://docs.cilium.io/en/v1.2/kubernetes/install/). - -Passing `--pod-network-cidr` option to `kubeadm init` is not required, but highly recommended. - -These commands will deploy Cilium with its own etcd managed by etcd operator. - -```shell -# Download required manifests from Cilium repository -wget https://github.com/cilium/cilium/archive/v1.2.0.zip -unzip v1.2.0.zip -cd cilium-1.2.0/examples/kubernetes/addons/etcd-operator - -# Generate and deploy etcd certificates -export CLUSTER_DOMAIN=$(kubectl get ConfigMap --namespace kube-system coredns -o yaml | awk '/kubernetes/ {print $2}') -tls/certs/gen-cert.sh $CLUSTER_DOMAIN -tls/deploy-certs.sh - -# Label kube-dns with fixed identity label -kubectl label -n kube-system pod $(kubectl -n kube-system get pods -l k8s-app=kube-dns -o jsonpath='{range .items[]}{.metadata.name}{" "}{end}') io.cilium.fixed-identity=kube-dns - -kubectl create -f ./ - -# Wait several minutes for Cilium, coredns and etcd pods to converge to a working state -``` - - -{{% /tab %}} -{{% tab name="Flannel" %}} - -For `flannel` to work correctly, you must pass `--pod-network-cidr=10.244.0.0/16` to `kubeadm init`. - -Set `/proc/sys/net/bridge/bridge-nf-call-iptables` to `1` by running `sysctl net.bridge.bridge-nf-call-iptables=1` -to pass bridged IPv4 traffic to iptables' chains. This is a requirement for some CNI plugins to work, for more information -please see [here](https://kubernetes.io/docs/concepts/cluster-administration/network-plugins/#network-plugin-requirements). - -Note that `flannel` works on `amd64`, `arm`, `arm64` and `ppc64le`. - -```shell -kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/bc79dd1505b0c8681ece4de4c0d86c5cd2643275/Documentation/kube-flannel.yml -``` - -For more information about `flannel`, see [the CoreOS flannel repository on GitHub -](https://github.com/coreos/flannel). -{{% /tab %}} - -{{% tab name="Kube-router" %}} -Set `/proc/sys/net/bridge/bridge-nf-call-iptables` to `1` by running `sysctl net.bridge.bridge-nf-call-iptables=1` -to pass bridged IPv4 traffic to iptables' chains. This is a requirement for some CNI plugins to work, for more information -please see [here](https://kubernetes.io/docs/concepts/cluster-administration/network-plugins/#network-plugin-requirements). - -Kube-router relies on kube-controller-manager to allocate pod CIDR for the nodes. Therefore, use `kubeadm init` with the `--pod-network-cidr` flag. - -Kube-router provides pod networking, network policy, and high-performing IP Virtual Server(IPVS)/Linux Virtual Server(LVS) based service proxy. - -For information on setting up Kubernetes cluster with Kube-router using kubeadm, please see official [setup guide](https://github.com/cloudnativelabs/kube-router/blob/master/docs/kubeadm.md). -{{% /tab %}} - -{{% tab name="Romana" %}} -Set `/proc/sys/net/bridge/bridge-nf-call-iptables` to `1` by running `sysctl net.bridge.bridge-nf-call-iptables=1` -to pass bridged IPv4 traffic to iptables' chains. This is a requirement for some CNI plugins to work, for more information -please see [here](https://kubernetes.io/docs/concepts/cluster-administration/network-plugins/#network-plugin-requirements). - -The official Romana set-up guide is [here](https://github.com/romana/romana/tree/master/containerize#using-kubeadm). - -Romana works on `amd64` only. - -```shell -kubectl apply -f https://raw.githubusercontent.com/romana/romana/master/containerize/specs/romana-kubeadm.yml -``` -{{% /tab %}} - -{{% tab name="Weave Net" %}} -Set `/proc/sys/net/bridge/bridge-nf-call-iptables` to `1` by running `sysctl net.bridge.bridge-nf-call-iptables=1` -to pass bridged IPv4 traffic to iptables' chains. This is a requirement for some CNI plugins to work, for more information -please see [here](https://kubernetes.io/docs/concepts/cluster-administration/network-plugins/#network-plugin-requirements). - -The official Weave Net set-up guide is [here](https://www.weave.works/docs/net/latest/kube-addon/). - -Weave Net works on `amd64`, `arm`, `arm64` and `ppc64le` without any extra action required. -Weave Net sets hairpin mode by default. This allows Pods to access themselves via their Service IP address -if they don't know their PodIP. - -```shell -kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" -``` -{{% /tab %}} - -{{% tab name="JuniperContrail/TungstenFabric" %}} -Provides overlay SDN solution, delivering multicloud networking, hybrid cloud networking, -simultaneous overlay-underlay support, network policy enforcement, network isolation, -service chaining and flexible load balancing. - -There are multiple, flexible ways to install JuniperContrail/TungstenFabric CNI. - -Kindly refer to this quickstart: [TungstenFabric](https://tungstenfabric.github.io/website/) -{{% /tab %}} -{{< /tabs >}} - - -Once a pod network has been installed, you can confirm that it is working by -checking that the CoreDNS pod is Running in the output of `kubectl get pods --all-namespaces`. -And once the CoreDNS pod is up and running, you can continue by joining your nodes. - -If your network is not working or CoreDNS is not in the Running state, check -out our [troubleshooting docs](/docs/setup/independent/troubleshooting-kubeadm/). - -### Master Isolation - -By default, your cluster will not schedule pods on the master for security -reasons. If you want to be able to schedule pods on the master, e.g. for a -single-machine Kubernetes cluster for development, run: - -```bash -kubectl taint nodes --all node-role.kubernetes.io/master- -``` - -With output looking something like: - -``` -node "test-01" untainted -taint "node-role.kubernetes.io/master:" not found -taint "node-role.kubernetes.io/master:" not found -``` - -This will remove the `node-role.kubernetes.io/master` taint from any nodes that -have it, including the master node, meaning that the scheduler will then be able -to schedule pods everywhere. - -### Joining your nodes {#join-nodes} - -The nodes are where your workloads (containers and pods, etc) run. To add new nodes to your cluster do the following for each machine: - -* SSH to the machine -* Become root (e.g. `sudo su -`) -* Run the command that was output by `kubeadm init`. For example: - -``` bash -kubeadm join --token : --discovery-token-ca-cert-hash sha256: -``` - -If you do not have the token, you can get it by running the following command on the master node: - -``` bash -kubeadm token list -``` - -The output is similar to this: - -``` console -TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS -8ewj1p.9r9hcjoqgajrj4gi 23h 2018-06-12T02:51:28Z authentication, The default bootstrap system: - signing token generated by bootstrappers: - 'kubeadm init'. kubeadm: - default-node-token -``` - -By default, tokens expire after 24 hours. If you are joining a node to the cluster after the current token has expired, -you can create a new token by running the following command on the master node: - -``` bash -kubeadm token create -``` - -The output is similar to this: - -``` console -5didvk.d09sbcov8ph2amjw -``` - -If you don't have the value of `--discovery-token-ca-cert-hash`, you can get it by running the following command chain on the master node: - -``` bash -openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | \ - openssl dgst -sha256 -hex | sed 's/^.* //' -``` - -The output is similar to this: - -``` console -8cb2de97839780a412b93877f8507ad6c94f73add17d5d7058e91741c9d5ec78 -``` - -{{< note >}} -**Note:** To specify an IPv6 tuple for `:`, IPv6 address must be enclosed in square brackets, for example: `[fd00::101]:2073`. -{{< /note >}} - -The output should look something like: - -``` -[preflight] Running pre-flight checks - -... (log output of join workflow) ... - -Node join complete: -* Certificate signing request sent to master and response - received. -* Kubelet informed of new secure connection details. - -Run 'kubectl get nodes' on the master to see this machine join. -``` - -A few seconds later, you should notice this node in the output from `kubectl get -nodes` when run on the master. - -### (Optional) Controlling your cluster from machines other than the master - -In order to get a kubectl on some other computer (e.g. laptop) to talk to your -cluster, you need to copy the administrator kubeconfig file from your master -to your workstation like this: - -``` bash -scp root@:/etc/kubernetes/admin.conf . -kubectl --kubeconfig ./admin.conf get nodes -``` - -{{< note >}} -**Note:** The example above assumes SSH access is enabled for root. If that is not the -case, you can copy the `admin.conf` file to be accessible by some other user -and `scp` using that other user instead. - -The `admin.conf` file gives the user _superuser_ privileges over the cluster. -This file should be used sparingly. For normal users, it's recommended to -generate an unique credential to which you whitelist privileges. You can do -this with the `kubeadm alpha phase kubeconfig user --client-name ` -command. That command will print out a KubeConfig file to STDOUT which you -should save to a file and distribute to your user. After that, whitelist -privileges by using `kubectl create (cluster)rolebinding`. -{{< /note >}} - -### (Optional) Proxying API Server to localhost - -If you want to connect to the API Server from outside the cluster you can use -`kubectl proxy`: - -```bash -scp root@:/etc/kubernetes/admin.conf . -kubectl --kubeconfig ./admin.conf proxy -``` - -You can now access the API Server locally at `http://localhost:8001/api/v1` - -## Tear down {#tear-down} - -To undo what kubeadm did, you should first [drain the -node](/docs/reference/generated/kubectl/kubectl-commands#drain) and make -sure that the node is empty before shutting it down. - -Talking to the master with the appropriate credentials, run: - -```bash -kubectl drain --delete-local-data --force --ignore-daemonsets -kubectl delete node -``` - -Then, on the node being removed, reset all kubeadm installed state: - -```bash -kubeadm reset -``` - -If you wish to start over simply run `kubeadm init` or `kubeadm join` with the -appropriate arguments. - -More options and information about the -[`kubeadm reset command`](/docs/reference/setup-tools/kubeadm/kubeadm-reset/). - -## Maintaining a cluster {#lifecycle} - -Instructions for maintaining kubeadm clusters (e.g. upgrades,downgrades, etc.) can be found [here.](/docs/tasks/administer-cluster/kubeadm) - -## Explore other add-ons {#other-addons} - -See the [list of add-ons](/docs/concepts/cluster-administration/addons/) to explore other add-ons, -including tools for logging, monitoring, network policy, visualization & -control of your Kubernetes cluster. - -## What's next {#whats-next} - -* Verify that your cluster is running properly with [Sonobuoy](https://github.com/heptio/sonobuoy) -* Learn about kubeadm's advanced usage in the [kubeadm reference documentation](/docs/reference/setup-tools/kubeadm/kubeadm) -* Learn more about Kubernetes [concepts](/docs/concepts/) and [`kubectl`](/docs/user-guide/kubectl-overview/). -* Configure log rotation. You can use **logrotate** for that. When using Docker, you can specify log rotation options for Docker daemon, for example `--log-driver=json-file --log-opt=max-size=10m --log-opt=max-file=5`. See [Configure and troubleshoot the Docker daemon](https://docs.docker.com/engine/admin/) for more details. - -## Feedback {#feedback} - -* For bugs, visit [kubeadm Github issue tracker](https://github.com/kubernetes/kubeadm/issues) -* For support, visit kubeadm Slack Channel: - [#kubeadm](https://kubernetes.slack.com/messages/kubeadm/) -* General SIG Cluster Lifecycle Development Slack Channel: - [#sig-cluster-lifecycle](https://kubernetes.slack.com/messages/sig-cluster-lifecycle/) -* SIG Cluster Lifecycle [SIG information](#TODO) -* SIG Cluster Lifecycle Mailing List: - [kubernetes-sig-cluster-lifecycle](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-lifecycle) - -## Version skew policy {#version-skew-policy} - -The kubeadm CLI tool of version vX.Y may deploy clusters with a control plane of version vX.Y or vX.(Y-1). -kubeadm CLI vX.Y can also upgrade an existing kubeadm-created cluster of version vX.(Y-1). - -Due to that we can't see into the future, kubeadm CLI vX.Y may or may not be able to deploy vX.(Y+1) clusters. - -Example: kubeadm v1.8 can deploy both v1.7 and v1.8 clusters and upgrade v1.7 kubeadm-created clusters to -v1.8. - -Please also check our [installation guide](/docs/setup/independent/install-kubeadm/#installing-kubeadm-kubelet-and-kubectl) -for more information on the version skew between kubelets and the control plane. - -## kubeadm works on multiple platforms {#multi-platform} - -kubeadm deb/rpm packages and binaries are built for amd64, arm (32-bit), arm64, ppc64le, and s390x -following the [multi-platform -proposal](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/multi-platform.md). - -Only some of the network providers offer solutions for all platforms. Please consult the list of -network providers above or the documentation from each provider to figure out whether the provider -supports your chosen platform. - -## Limitations {#limitations} - -Please note: kubeadm is a work in progress and these limitations will be -addressed in due course. - -1. The cluster created here has a single master, with a single etcd database - running on it. This means that if the master fails, your cluster may lose - data and may need to be recreated from scratch. Adding HA support - (multiple etcd servers, multiple API servers, etc) to kubeadm is - still a work-in-progress. - - Workaround: regularly - [back up etcd](https://coreos.com/etcd/docs/latest/admin_guide.html). The - etcd data directory configured by kubeadm is at `/var/lib/etcd` on the master. - -## Troubleshooting {#troubleshooting} - -If you are running into difficulties with kubeadm, please consult our [troubleshooting docs](/docs/setup/independent/troubleshooting-kubeadm/). - - - - diff --git a/content/ko/docs/setup/independent/high-availability.md b/content/ko/docs/setup/independent/high-availability.md deleted file mode 100644 index 0774a35b6d3d9..0000000000000 --- a/content/ko/docs/setup/independent/high-availability.md +++ /dev/null @@ -1,552 +0,0 @@ ---- -title: Creating Highly Available Clusters with kubeadm -content_template: templates/task -weight: 50 ---- - -{{% capture overview %}} - -This page explains two different approaches to setting up a highly available Kubernetes -cluster using kubeadm: - -- With stacked masters. This approach requires less infrastructure. etcd members -and control plane nodes are co-located. -- With an external etcd cluster. This approach requires more infrastructure. The -control plane nodes and etcd members are separated. - -Your clusters must run Kubernetes version 1.12 or later. You should also be aware that -setting up HA clusters with kubeadm is still experimental. You might encounter issues -with upgrading your clusters, for example. We encourage you to try either approach, -and provide feedback. - -{{< caution >}} -**Caution**: This page does not address running your cluster on a cloud provider. -In a cloud environment, neither approach documented here works with Service objects -of type LoadBalancer, or with dynamic PersistentVolumes. -{{< /caution >}} - -{{% /capture %}} - -{{% capture prerequisites %}} - -For both methods you need this infrastructure: - -- Three machines that meet [kubeadm's minimum - requirements](/docs/setup/independent/install-kubeadm/#before-you-begin) for - the masters -- Three machines that meet [kubeadm's minimum - requirements](/docs/setup/independent/install-kubeadm/#before-you-begin) for - the workers -- Full network connectivity between all machines in the cluster (public or - private network is fine) -- SSH access from one device to all nodes in the system -- sudo privileges on all machines - -For the external etcd cluster only, you also need: - -- Three additional machines for etcd members - -{{< note >}} -**Note**: The following examples run Calico as the Pod networking provider. If -you run another networking provider, make sure to replace any default values as -needed. -{{< /note >}} - -{{% /capture %}} - -{{% capture steps %}} - -## First steps for both methods - -{{< note >}} -**Note**: All commands in this guide on any control plane or etcd node should be -run as root. -{{< /note >}} - -- Find your pod CIDR. For details, see [the CNI network - documentation](/docs/setup/independent/create-cluster-kubeadm/#pod-network). - The example uses Calico, so the pod CIDR is `192.168.0.0/16`. - -### Configure SSH - -1. Enable ssh-agent on your main device that has access to all other nodes in - the system: - - ``` - eval $(ssh-agent) - ``` - -1. Add your SSH identity to the session: - - ``` - ssh-add ~/.ssh/path_to_private_key - ``` - -1. SSH between nodes to check that the connection is working correctly. - - - When you SSH to any node, make sure to add the `-A` flag: - - ``` - ssh -A 10.0.0.7 - ``` - - - When using sudo on any node, make sure to preserve the environment so SSH - forwarding works: - - ``` - sudo -E -s - ``` - -### Create load balancer for kube-apiserver - -{{< note >}} -**Note**: There are many configurations for load balancers. The following -example is only one option. Your cluster requirements may need a -different configuration. -{{< /note >}} - -1. Create a kube-apiserver load balancer with a name that resolves to DNS. - - - In a cloud environment you should place your control plane nodes behind a TCP - forwarding load balancer. This load balancer distributes traffic to all - healthy control plane nodes in its target list. The health check for - an apiserver is a TCP check on the port the kube-apiserver listens on - (default value `:6443`). - - - It is not recommended to use an IP address directly in a cloud environment. - - - The load balancer must be able to communicate with all control plane nodes - on the apiserver port. It must also allow incoming traffic on its - listening port. - -1. Add the first control plane nodes to the load balancer and test the - connection: - - ```sh - nc -v LOAD_BALANCER_IP PORT - ``` - - - A connection refused error is expected because the apiserver is not yet - running. A timeout, however, means the load balancer cannot communicate - with the control plane node. If a timeout occurs, reconfigure the load - balancer to communicate with the control plane node. - -1. Add the remaining control plane nodes to the load balancer target group. - -## Stacked control plane nodes - -### Bootstrap the first stacked control plane node - -{{< note >}} -**Note**: Optionally replace `stable` with a different version of Kubernetes, for example `v1.12.0`. -{{< /note >}} - -1. Create a `kubeadm-config.yaml` template file: - - apiVersion: kubeadm.k8s.io/v1beta1 - kind: ClusterConfiguration - kubernetesVersion: stable - apiServer: - certSANs: - - "LOAD_BALANCER_DNS" - controlPlaneEndpoint: "LOAD_BALANCER_DNS:LOAD_BALANCER_PORT" - etcd: - local: - extraArgs: - listen-client-urls: "https://127.0.0.1:2379,https://CP0_IP:2379" - advertise-client-urls: "https://CP0_IP:2379" - listen-peer-urls: "https://CP0_IP:2380" - initial-advertise-peer-urls: "https://CP0_IP:2380" - initial-cluster: "CP0_HOSTNAME=https://CP0_IP:2380" - serverCertSANs: - - CP0_HOSTNAME - - CP0_IP - peerCertSANs: - - CP0_HOSTNAME - - CP0_IP - networking: - # This CIDR is a Calico default. Substitute or remove for your CNI provider. - podSubnet: "192.168.0.0/16" - -1. Replace the following variables in the template with the appropriate - values for your cluster: - - * `LOAD_BALANCER_DNS` - * `LOAD_BALANCER_PORT` - * `CP0_HOSTNAME` - * `CP0_IP` - -1. Run `kubeadm init --config kubeadm-config.yaml` - -### Copy required files to other control plane nodes - -The following certificates and other required files were created when you ran `kubeadm init`. -Copy these files to your other control plane nodes: - -- `/etc/kubernetes/pki/ca.crt` -- `/etc/kubernetes/pki/ca.key` -- `/etc/kubernetes/pki/sa.key` -- `/etc/kubernetes/pki/sa.pub` -- `/etc/kubernetes/pki/front-proxy-ca.crt` -- `/etc/kubernetes/pki/front-proxy-ca.key` -- `/etc/kubernetes/pki/etcd/ca.crt` -- `/etc/kubernetes/pki/etcd/ca.key` - -Copy the admin kubeconfig to the other control plane nodes: - -- `/etc/kubernetes/admin.conf` - -In the following example, replace -`CONTROL_PLANE_IPS` with the IP addresses of the other control plane nodes. - -```sh -USER=ubuntu # customizable -CONTROL_PLANE_IPS="10.0.0.7 10.0.0.8" -for host in ${CONTROL_PLANE_IPS}; do - scp /etc/kubernetes/pki/ca.crt "${USER}"@$host: - scp /etc/kubernetes/pki/ca.key "${USER}"@$host: - scp /etc/kubernetes/pki/sa.key "${USER}"@$host: - scp /etc/kubernetes/pki/sa.pub "${USER}"@$host: - scp /etc/kubernetes/pki/front-proxy-ca.crt "${USER}"@$host: - scp /etc/kubernetes/pki/front-proxy-ca.key "${USER}"@$host: - scp /etc/kubernetes/pki/etcd/ca.crt "${USER}"@$host:etcd-ca.crt - scp /etc/kubernetes/pki/etcd/ca.key "${USER}"@$host:etcd-ca.key - scp /etc/kubernetes/admin.conf "${USER}"@$host: -done -``` - -{{< note >}} -**Note**: Remember that your config may differ from this example. -{{< /note >}} - -### Add the second stacked control plane node - -1. Create a second, different `kubeadm-config.yaml` template file: - - apiVersion: kubeadm.k8s.io/v1beta1 - kind: ClusterConfiguration - kubernetesVersion: stable - apiServer: - certSANs: - - "LOAD_BALANCER_DNS" - controlPlaneEndpoint: "LOAD_BALANCER_DNS:LOAD_BALANCER_PORT" - etcd: - local: - extraArgs: - listen-client-urls: "https://127.0.0.1:2379,https://CP1_IP:2379" - advertise-client-urls: "https://CP1_IP:2379" - listen-peer-urls: "https://CP1_IP:2380" - initial-advertise-peer-urls: "https://CP1_IP:2380" - initial-cluster: "CP0_HOSTNAME=https://CP0_IP:2380,CP1_HOSTNAME=https://CP1_IP:2380" - initial-cluster-state: existing - serverCertSANs: - - CP1_HOSTNAME - - CP1_IP - peerCertSANs: - - CP1_HOSTNAME - - CP1_IP - networking: - # This CIDR is a calico default. Substitute or remove for your CNI provider. - podSubnet: "192.168.0.0/16" - -1. Replace the following variables in the template with the appropriate values for your cluster: - - - `LOAD_BALANCER_DNS` - - `LOAD_BALANCER_PORT` - - `CP0_HOSTNAME` - - `CP0_IP` - - `CP1_HOSTNAME` - - `CP1_IP` - -1. Move the copied files to the correct locations: - - ```sh - USER=ubuntu # customizable - mkdir -p /etc/kubernetes/pki/etcd - mv /home/${USER}/ca.crt /etc/kubernetes/pki/ - mv /home/${USER}/ca.key /etc/kubernetes/pki/ - mv /home/${USER}/sa.pub /etc/kubernetes/pki/ - mv /home/${USER}/sa.key /etc/kubernetes/pki/ - mv /home/${USER}/front-proxy-ca.crt /etc/kubernetes/pki/ - mv /home/${USER}/front-proxy-ca.key /etc/kubernetes/pki/ - mv /home/${USER}/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt - mv /home/${USER}/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key - mv /home/${USER}/admin.conf /etc/kubernetes/admin.conf - ``` - -1. Run the kubeadm phase commands to bootstrap the kubelet: - - ```sh - kubeadm alpha phase certs all --config kubeadm-config.yaml - kubeadm alpha phase kubelet config write-to-disk --config kubeadm-config.yaml - kubeadm alpha phase kubelet write-env-file --config kubeadm-config.yaml - kubeadm alpha phase kubeconfig kubelet --config kubeadm-config.yaml - systemctl start kubelet - ``` - -1. Run the commands to add the node to the etcd cluster: - - ```sh - export CP0_IP=10.0.0.7 - export CP0_HOSTNAME=cp0 - export CP1_IP=10.0.0.8 - export CP1_HOSTNAME=cp1 - - export KUBECONFIG=/etc/kubernetes/admin.conf - kubectl exec -n kube-system etcd-${CP0_HOSTNAME} -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://${CP0_IP}:2379 member add ${CP1_HOSTNAME} https://${CP1_IP}:2380 - kubeadm alpha phase etcd local --config kubeadm-config.yaml - ``` - - - This command causes the etcd cluster to become unavailable for a - brief period, after the node is added to the running cluster, and before the - new node is joined to the etcd cluster. - -1. Deploy the control plane components and mark the node as a master: - - ```sh - kubeadm alpha phase kubeconfig all --config kubeadm-config.yaml - kubeadm alpha phase controlplane all --config kubeadm-config.yaml - kubeadm alpha phase kubelet config annotate-cri --config kubeadm-config.yaml - kubeadm alpha phase mark-master --config kubeadm-config.yaml - ``` - -### Add the third stacked control plane node - -1. Create a third, different `kubeadm-config.yaml` template file: - - apiVersion: kubeadm.k8s.io/v1beta1 - kind: ClusterConfiguration - kubernetesVersion: stable - apiServer: - certSANs: - - "LOAD_BALANCER_DNS" - controlPlaneEndpoint: "LOAD_BALANCER_DNS:LOAD_BALANCER_PORT" - etcd: - local: - extraArgs: - listen-client-urls: "https://127.0.0.1:2379,https://CP2_IP:2379" - advertise-client-urls: "https://CP2_IP:2379" - listen-peer-urls: "https://CP2_IP:2380" - initial-advertise-peer-urls: "https://CP2_IP:2380" - initial-cluster: "CP0_HOSTNAME=https://CP0_IP:2380,CP1_HOSTNAME=https://CP1_IP:2380,CP2_HOSTNAME=https://CP2_IP:2380" - initial-cluster-state: existing - serverCertSANs: - - CP2_HOSTNAME - - CP2_IP - peerCertSANs: - - CP2_HOSTNAME - - CP2_IP - networking: - # This CIDR is a calico default. Substitute or remove for your CNI provider. - podSubnet: "192.168.0.0/16" - -1. Replace the following variables in the template with the appropriate values for your cluster: - - - `LOAD_BALANCER_DNS` - - `LOAD_BALANCER_PORT` - - `CP0_HOSTNAME` - - `CP0_IP` - - `CP1_HOSTNAME` - - `CP1_IP` - - `CP2_HOSTNAME` - - `CP2_IP` - -1. Move the copied files to the correct locations: - - ```sh - USER=ubuntu # customizable - mkdir -p /etc/kubernetes/pki/etcd - mv /home/${USER}/ca.crt /etc/kubernetes/pki/ - mv /home/${USER}/ca.key /etc/kubernetes/pki/ - mv /home/${USER}/sa.pub /etc/kubernetes/pki/ - mv /home/${USER}/sa.key /etc/kubernetes/pki/ - mv /home/${USER}/front-proxy-ca.crt /etc/kubernetes/pki/ - mv /home/${USER}/front-proxy-ca.key /etc/kubernetes/pki/ - mv /home/${USER}/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt - mv /home/${USER}/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key - mv /home/${USER}/admin.conf /etc/kubernetes/admin.conf - ``` - -1. Run the kubeadm phase commands to bootstrap the kubelet: - - ```sh - kubeadm alpha phase certs all --config kubeadm-config.yaml - kubeadm alpha phase kubelet config write-to-disk --config kubeadm-config.yaml - kubeadm alpha phase kubelet write-env-file --config kubeadm-config.yaml - kubeadm alpha phase kubeconfig kubelet --config kubeadm-config.yaml - systemctl start kubelet - ``` - -1. Run the commands to add the node to the etcd cluster: - - ```sh - export CP0_IP=10.0.0.7 - export CP0_HOSTNAME=cp0 - export CP2_IP=10.0.0.9 - export CP2_HOSTNAME=cp2 - - export KUBECONFIG=/etc/kubernetes/admin.conf - kubectl exec -n kube-system etcd-${CP0_HOSTNAME} -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://${CP0_IP}:2379 member add ${CP2_HOSTNAME} https://${CP2_IP}:2380 - kubeadm alpha phase etcd local --config kubeadm-config.yaml - ``` - -1. Deploy the control plane components and mark the node as a master: - - ```sh - kubeadm alpha phase kubeconfig all --config kubeadm-config.yaml - kubeadm alpha phase controlplane all --config kubeadm-config.yaml - kubeadm alpha phase kubelet config annotate-cri --config kubeadm-config.yaml - kubeadm alpha phase mark-master --config kubeadm-config.yaml - ``` - -## External etcd - -### Set up the cluster - -- Follow [these instructions](/docs/setup/independent/setup-ha-etcd-with-kubeadm/) - to set up the etcd cluster. - -#### Copy required files from an etcd node to all control plane nodes - -In the following example, replace `USER` and `CONTROL_PLANE_HOSTS` values with values -for your environment. - -```sh -# Make a list of required etcd certificate files -cat << EOF > etcd-pki-files.txt -/etc/kubernetes/pki/etcd/ca.crt -/etc/kubernetes/pki/apiserver-etcd-client.crt -/etc/kubernetes/pki/apiserver-etcd-client.key -EOF - -# create the archive -tar -czf etcd-pki.tar.gz -T etcd-pki-files.txt - -# copy the archive to the control plane nodes -USER=ubuntu -CONTROL_PLANE_HOSTS="10.0.0.7 10.0.0.8 10.0.0.9" -for host in $CONTROL_PLANE_HOSTS; do - scp etcd-pki.tar.gz "${USER}"@$host: -done -``` - -### Set up the first control plane node - -1. Extract the etcd certificates - - mkdir -p /etc/kubernetes/pki - tar -xzf etcd-pki.tar.gz -C /etc/kubernetes/pki --strip-components=3 - -1. Create a `kubeadm-config.yaml`: - -{{< note >}} -**Note**: Optionally replace `stable` with a different version of Kubernetes, for example `v1.11.3`. -{{< /note >}} - - apiVersion: kubeadm.k8s.io/v1beta1 - kind: ClusterConfiguration - kubernetesVersion: stable - apiServer: - certSANs: - - "LOAD_BALANCER_DNS" - controlPlaneEndpoint: "LOAD_BALANCER_DNS:LOAD_BALANCER_PORT" - etcd: - external: - endpoints: - - https://ETCD_0_IP:2379 - - https://ETCD_1_IP:2379 - - https://ETCD_2_IP:2379 - caFile: /etc/kubernetes/pki/etcd/ca.crt - certFile: /etc/kubernetes/pki/apiserver-etcd-client.crt - keyFile: /etc/kubernetes/pki/apiserver-etcd-client.key - networking: - # This CIDR is a calico default. Substitute or remove for your CNI provider. - podSubnet: "192.168.0.0/16" - -1. Replace the following variables in the template with the appropriate values for your cluster: - - - `LOAD_BALANCER_DNS` - - `LOAD_BALANCER_PORT` - - `ETCD_0_IP` - - `ETCD_1_IP` - - `ETCD_2_IP` - -1. Run `kubeadm init --config kubeadm-config.yaml` -1. Copy the output join commamnd. - -### Copy required files to the correct locations - -The following pki files were created during the `kubeadm init` step and must be shared with -all other control plane nodes. - -- `/etc/kubernetes/pki/ca.crt` -- `/etc/kubernetes/pki/ca.key` -- `/etc/kubernetes/pki/sa.key` -- `/etc/kubernetes/pki/sa.pub` -- `/etc/kubernetes/pki/front-proxy-ca.crt` -- `/etc/kubernetes/pki/front-proxy-ca.key` - -In the following example, replace the list of -`CONTROL_PLANE_IPS` values with the IP addresses of the other control plane nodes. - -```sh -# make a list of required kubernetes certificate files -cat << EOF > certificate_files.txt -/etc/kubernetes/pki/ca.crt -/etc/kubernetes/pki/ca.key -/etc/kubernetes/pki/sa.key -/etc/kubernetes/pki/sa.pub -/etc/kubernetes/pki/front-proxy-ca.crt -/etc/kubernetes/pki/front-proxy-ca.key -EOF - -# create the archive -tar -czf control-plane-certificates.tar.gz -T certificate_files.txt - -USER=ubuntu # customizable -CONTROL_PLANE_IPS="10.0.0.7 10.0.0.8" -for host in ${CONTROL_PLANE_IPS}; do - scp control-plane-certificates.tar.gz "${USER}"@$host: -done -``` - -### Set up the other control plane nodes - -1. Extract the required certificates - - mkdir -p /etc/kubernetes/pki - tar -xzf etcd-pki.tar.gz -C /etc/kubernetes/pki --strip-components 3 - tar -xzf control-plane-certificates.tar.gz -C /etc/kubernetes/pki --strip-components 3 - -1. Verify the location of the copied files. - Your `/etc/kubernetes` directory should look like this: - - - `/etc/kubernetes/pki/apiserver-etcd-client.crt` - - `/etc/kubernetes/pki/apiserver-etcd-client.key` - - `/etc/kubernetes/pki/ca.crt` - - `/etc/kubernetes/pki/ca.key` - - `/etc/kubernetes/pki/front-proxy-ca.crt` - - `/etc/kubernetes/pki/front-proxy-ca.key` - - `/etc/kubernetes/pki/sa.key` - - `/etc/kubernetes/pki/sa.pub` - - `/etc/kubernetes/pki/etcd/ca.crt` - -1. Run the copied `kubeadm join` command from above. Add the flag "--experimental-control-plane". - The final command will look something like this: - - kubeadm join ha.k8s.example.com:6443 --token 5ynki1.3erp9i3yo7gqg1nv --discovery-token-ca-cert-hash sha256:a00055bd8c710a9906a3d91b87ea02976334e1247936ac061d867a0f014ecd81 --experimental-control-plane - -## Common tasks after bootstrapping control plane - -### Install a pod network - -[Follow these instructions](/docs/setup/independent/create-cluster-kubeadm/#pod-network) to install -the pod network. Make sure this corresponds to whichever pod CIDR you provided -in the master configuration file. - -### Install workers - -Each worker node can now be joined to the cluster with the command returned from any of the -`kubeadm init` commands. - -{{% /capture %}} diff --git a/content/ko/docs/setup/independent/install-kubeadm.md b/content/ko/docs/setup/independent/install-kubeadm.md deleted file mode 100644 index 2b4413ea8d01b..0000000000000 --- a/content/ko/docs/setup/independent/install-kubeadm.md +++ /dev/null @@ -1,253 +0,0 @@ ---- -title: Installing kubeadm -content_template: templates/task -weight: 20 ---- - -{{% capture overview %}} - -This page shows how to install the `kubeadm` toolbox. -For information how to create a cluster with kubeadm once you have performed this installation process, -see the [Using kubeadm to Create a Cluster](/docs/setup/independent/create-cluster-kubeadm/) page. - -{{% /capture %}} - -{{% capture prerequisites %}} - -* One or more machines running one of: - - Ubuntu 16.04+ - - Debian 9 - - CentOS 7 - - RHEL 7 - - Fedora 25/26 (best-effort) - - HypriotOS v1.0.1+ - - Container Linux (tested with 1800.6.0) -* 2 GB or more of RAM per machine (any less will leave little room for your apps) -* 2 CPUs or more -* Full network connectivity between all machines in the cluster (public or private network is fine) -* Unique hostname, MAC address, and product_uuid for every node. See [here](#verify-the-mac-address-and-product-uuid-are-unique-for-every-node) for more details. -* Certain ports are open on your machines. See [here](#check-required-ports) for more details. -* Swap disabled. You **MUST** disable swap in order for the kubelet to work properly. - -{{% /capture %}} - -{{% capture steps %}} - -## Verify the MAC address and product_uuid are unique for every node - -* You can get the MAC address of the network interfaces using the command `ip link` or `ifconfig -a` -* The product_uuid can be checked by using the command `sudo cat /sys/class/dmi/id/product_uuid` - -It is very likely that hardware devices will have unique addresses, although some virtual machines may have -identical values. Kubernetes uses these values to uniquely identify the nodes in the cluster. -If these values are not unique to each node, the installation process -may [fail](https://github.com/kubernetes/kubeadm/issues/31). - -## Check network adapters - -If you have more than one network adapter, and your Kubernetes components are not reachable on the default -route, we recommend you add IP route(s) so Kubernetes cluster addresses go via the appropriate adapter. - -## Check required ports - -### Master node(s) - -| Protocol | Direction | Port Range | Purpose | Used By | -|----------|-----------|------------|-------------------------|---------------------------| -| TCP | Inbound | 6443* | Kubernetes API server | All | -| TCP | Inbound | 2379-2380 | etcd server client API | kube-apiserver, etcd | -| TCP | Inbound | 10250 | Kubelet API | Self, Control plane | -| TCP | Inbound | 10251 | kube-scheduler | Self | -| TCP | Inbound | 10252 | kube-controller-manager | Self | - -### Worker node(s) - -| Protocol | Direction | Port Range | Purpose | Used By | -|----------|-----------|-------------|-----------------------|-------------------------| -| TCP | Inbound | 10250 | Kubelet API | Self, Control plane | -| TCP | Inbound | 30000-32767 | NodePort Services** | All | - -** Default port range for [NodePort Services](/docs/concepts/services-networking/service/). - -Any port numbers marked with * are overridable, so you will need to ensure any -custom ports you provide are also open. - -Although etcd ports are included in master nodes, you can also host your own -etcd cluster externally or on custom ports. - -The pod network plugin you use (see below) may also require certain ports to be -open. Since this differs with each pod network plugin, please see the -documentation for the plugins about what port(s) those need. - -## Installing runtime - -Since v1.6.0, Kubernetes has enabled the use of CRI, Container Runtime Interface, by default. -The container runtime used by default is Docker, which is enabled through the built-in -`dockershim` CRI implementation inside of the `kubelet`. - -Other CRI-based runtimes include: - -- [containerd](https://github.com/containerd/cri) (CRI plugin built into containerd) -- [cri-o](https://github.com/kubernetes-incubator/cri-o) -- [frakti](https://github.com/kubernetes/frakti) -- [rkt](https://github.com/kubernetes-incubator/rktlet) - -Refer to the [CRI installation instructions](/docs/setup/cri) for more information. - -## Installing kubeadm, kubelet and kubectl - -You will install these packages on all of your machines: - -* `kubeadm`: the command to bootstrap the cluster. - -* `kubelet`: the component that runs on all of the machines in your cluster - and does things like starting pods and containers. - -* `kubectl`: the command line util to talk to your cluster. - -kubeadm **will not** install or manage `kubelet` or `kubectl` for you, so you will -need to ensure they match the version of the Kubernetes control panel you want -kubeadm to install for you. If you do not, there is a risk of a version skew occurring that -can lead to unexpected, buggy behaviour. However, _one_ minor version skew between the -kubelet and the control plane is supported, but the kubelet version may never exceed the API -server version. For example, kubelets running 1.7.0 should be fully compatible with a 1.8.0 API server, -but not vice versa. - -{{< warning >}} -These instructions exclude all Kubernetes packages from any system upgrades. -This is because kubeadm and Kubernetes require -[special attention to upgrade](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-11/). -{{}} - -For more information on version skews, please read our -[version skew policy](/docs/setup/independent/create-cluster-kubeadm/#version-skew-policy). - -{{< tabs name="k8s_install" >}} -{{% tab name="Ubuntu, Debian or HypriotOS" %}} -```bash -apt-get update && apt-get install -y apt-transport-https curl -curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - -cat </etc/apt/sources.list.d/kubernetes.list -deb https://apt.kubernetes.io/ kubernetes-xenial main -EOF -apt-get update -apt-get install -y kubelet kubeadm kubectl -apt-mark hold kubelet kubeadm kubectl -``` -{{% /tab %}} -{{% tab name="CentOS, RHEL or Fedora" %}} -```bash -cat < /etc/yum.repos.d/kubernetes.repo -[kubernetes] -name=Kubernetes -baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 -enabled=1 -gpgcheck=1 -repo_gpgcheck=1 -gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg -exclude=kube* -EOF - -# Set SELinux in permissive mode (effectively disabling it) -setenforce 0 -sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config - -yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes - -systemctl enable kubelet && systemctl start kubelet -``` - - **Note:** - - - Setting SELinux in permissive mode by running `setenforce 0` and `sed ...` effectively disables it. - This is required to allow containers to access the host filesystem, which is needed by pod networks for example. - You have to do this until SELinux support is improved in the kubelet. - - Some users on RHEL/CentOS 7 have reported issues with traffic being routed incorrectly due to iptables being bypassed. You should ensure - `net.bridge.bridge-nf-call-iptables` is set to 1 in your `sysctl` config, e.g. - - ```bash - cat < /etc/sysctl.d/k8s.conf - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - EOF - sysctl --system - ``` -{{% /tab %}} -{{% tab name="Container Linux" %}} -Install CNI plugins (required for most pod network): - -```bash -CNI_VERSION="v0.6.0" -mkdir -p /opt/cni/bin -curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-amd64-${CNI_VERSION}.tgz" | tar -C /opt/cni/bin -xz -``` - -Install crictl (required for kubeadm / Kubelet Container Runtime Interface (CRI)) - -```bash -CRICTL_VERSION="v1.11.1" -mkdir -p /opt/bin -curl -L "https://github.com/kubernetes-incubator/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | tar -C /opt/bin -xz -``` - -Install `kubeadm`, `kubelet`, `kubectl` and add a `kubelet` systemd service: - -```bash -RELEASE="$(curl -sSL https://dl.k8s.io/release/stable.txt)" - -mkdir -p /opt/bin -cd /opt/bin -curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl} -chmod +x {kubeadm,kubelet,kubectl} - -curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service -mkdir -p /etc/systemd/system/kubelet.service.d -curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf -``` - -Enable and start `kubelet`: - -```bash -systemctl enable kubelet && systemctl start kubelet -``` -{{% /tab %}} -{{< /tabs >}} - - -The kubelet is now restarting every few seconds, as it waits in a crashloop for -kubeadm to tell it what to do. - -## Configure cgroup driver used by kubelet on Master Node - -When using Docker, kubeadm will automatically detect the cgroup driver for the kubelet -and set it in the `/var/lib/kubelet/kubeadm-flags.env` file during runtime. - -If you are using a different CRI, you have to modify the file -`/etc/default/kubelet` with your `cgroup-driver` value, like so: - -```bash -KUBELET_KUBEADM_EXTRA_ARGS=--cgroup-driver= -``` - -This file will be used by `kubeadm init` and `kubeadm join` to source extra -user defined arguments for the kubelet. - -Please mind, that you **only** have to do that if the cgroup driver of your CRI -is not `cgroupfs`, because that is the default value in the kubelet already. - -Restarting the kubelet is required: - -```bash -systemctl daemon-reload -systemctl restart kubelet -``` - -## Troubleshooting - -If you are running into difficulties with kubeadm, please consult our [troubleshooting docs](/docs/setup/independent/troubleshooting-kubeadm/). - -{{% capture whatsnext %}} - -* [Using kubeadm to Create a Cluster](/docs/setup/independent/create-cluster-kubeadm/) - -{{% /capture %}} diff --git a/content/ko/docs/setup/independent/setup-ha-etcd-with-kubeadm.md b/content/ko/docs/setup/independent/setup-ha-etcd-with-kubeadm.md deleted file mode 100644 index 08d547cfbdfa1..0000000000000 --- a/content/ko/docs/setup/independent/setup-ha-etcd-with-kubeadm.md +++ /dev/null @@ -1,263 +0,0 @@ ---- -title: Set up a High Availability etcd cluster with kubeadm -content_template: templates/task -weight: 60 ---- - -{{% capture overview %}} - -Kubeadm defaults to running a single member etcd cluster in a static pod managed -by the kubelet on the control plane node. This is not a high availability setup -as the etcd cluster contains only one member and cannot sustain any members -becoming unavailable. This task walks through the process of creating a high -availability etcd cluster of three members that can be used as an external etcd -when using kubeadm to set up a kubernetes cluster. - -{{% /capture %}} - -{{% capture prerequisites %}} - -* Three hosts that can talk to each other over ports 2379 and 2380. This - document assumes these default ports. However, they are configurable through - the kubeadm config file. -* Each host must [have docker, kubelet, and kubeadm installed][toolbox]. -* Some infrastructure to copy files between hosts. For example `ssh` and `scp` - can satisfy this requirement. - -[toolbox]: /docs/setup/independent/install-kubeadm/ - -{{% /capture %}} - -{{% capture steps %}} - -## Setting up the cluster - -The general approach is to generate all certs on one node and only distribute -the *necessary* files to the other nodes. - -{{< note >}} -**Note:** kubeadm contains all the necessary crytographic machinery to generate -the certificates described below; no other cryptographic tooling is required for -this example. -{{< /note >}} - - -1. Configure the kubelet to be a service manager for etcd. - - Running etcd is simpler than running kubernetes so you must override the - kubeadm-provided kubelet unit file by creating a new one with a higher - precedence. - - ```sh - cat << EOF > /etc/systemd/system/kubelet.service.d/20-etcd-service-manager.conf - [Service] - ExecStart= - ExecStart=/usr/bin/kubelet --address=127.0.0.1 --pod-manifest-path=/etc/kubernetes/manifests --allow-privileged=true - Restart=always - EOF - - systemctl daemon-reload - systemctl restart kubelet - ``` - -1. Create configuration files for kubeadm. - - Generate one kubeadm configuration file for each host that will have an etcd - member running on it using the following script. - - ```sh - # Update HOST0, HOST1, and HOST2 with the IPs or resolvable names of your hosts - export HOST0=10.0.0.6 - export HOST1=10.0.0.7 - export HOST2=10.0.0.8 - - # Create temp directories to store files that will end up on other hosts. - mkdir -p /tmp/${HOST0}/ /tmp/${HOST1}/ /tmp/${HOST2}/ - - ETCDHOSTS=(${HOST0} ${HOST1} ${HOST2}) - NAMES=("infra0" "infra1" "infra2") - - for i in "${!ETCDHOSTS[@]}"; do - HOST=${ETCDHOSTS[$i]} - NAME=${NAMES[$i]} - cat << EOF > /tmp/${HOST}/kubeadmcfg.yaml - apiVersion: "kubeadm.k8s.io/v1beta1" - kind: ClusterConfiguration - etcd: - local: - serverCertSANs: - - "${HOST}" - peerCertSANs: - - "${HOST}" - extraArgs: - initial-cluster: infra0=https://${ETCDHOSTS[0]}:2380,infra1=https://${ETCDHOSTS[1]}:2380,infra2=https://${ETCDHOSTS[2]}:2380 - initial-cluster-state: new - name: ${NAME} - listen-peer-urls: https://${HOST}:2380 - listen-client-urls: https://${HOST}:2379 - advertise-client-urls: https://${HOST}:2379 - initial-advertise-peer-urls: https://${HOST}:2380 - EOF - done - ``` - -1. Generate the certificate authority - - If you already have a CA then the only action that is copying the CA's `crt` and - `key` file to `/etc/kubernetes/pki/etcd/ca.crt` and - `/etc/kubernetes/pki/etcd/ca.key`. After those files have been copied, - proceed to the next step, "Create certificates for each member". - - If you do not already have a CA then run this command on `$HOST0` (where you - generated the configuration files for kubeadm). - - ``` - kubeadm alpha phase certs etcd-ca - ``` - - This creates two files - - - `/etc/kubernetes/pki/etcd/ca.crt` - - `/etc/kubernetes/pki/etcd/ca.key` - -1. Create certificates for each member - - ```sh - kubeadm alpha phase certs etcd-server --config=/tmp/${HOST2}/kubeadmcfg.yaml - kubeadm alpha phase certs etcd-peer --config=/tmp/${HOST2}/kubeadmcfg.yaml - kubeadm alpha phase certs etcd-healthcheck-client --config=/tmp/${HOST2}/kubeadmcfg.yaml - kubeadm alpha phase certs apiserver-etcd-client --config=/tmp/${HOST2}/kubeadmcfg.yaml - cp -R /etc/kubernetes/pki /tmp/${HOST2}/ - # cleanup non-reusable certificates - find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete - - kubeadm alpha phase certs etcd-server --config=/tmp/${HOST1}/kubeadmcfg.yaml - kubeadm alpha phase certs etcd-peer --config=/tmp/${HOST1}/kubeadmcfg.yaml - kubeadm alpha phase certs etcd-healthcheck-client --config=/tmp/${HOST1}/kubeadmcfg.yaml - kubeadm alpha phase certs apiserver-etcd-client --config=/tmp/${HOST1}/kubeadmcfg.yaml - cp -R /etc/kubernetes/pki /tmp/${HOST1}/ - find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete - - kubeadm alpha phase certs etcd-server --config=/tmp/${HOST0}/kubeadmcfg.yaml - kubeadm alpha phase certs etcd-peer --config=/tmp/${HOST0}/kubeadmcfg.yaml - kubeadm alpha phase certs etcd-healthcheck-client --config=/tmp/${HOST0}/kubeadmcfg.yaml - kubeadm alpha phase certs apiserver-etcd-client --config=/tmp/${HOST0}/kubeadmcfg.yaml - # No need to move the certs because they are for HOST0 - - # clean up certs that should not be copied off this host - find /tmp/${HOST2} -name ca.key -type f -delete - find /tmp/${HOST1} -name ca.key -type f -delete - ``` - -1. Copy certificates and kubeadm configs - - The certificates have been generated and now they must be moved to their - respective hosts. - - ```sh - USER=ubuntu - HOST=${HOST1} - scp -r /tmp/${HOST}/* ${USER}@${HOST}: - ssh ${USER}@${HOST} - USER@HOST $ sudo -Es - root@HOST $ chown -R root:root pki - root@HOST $ mv pki /etc/kubernetes/ - ``` - -1. Ensure all expected files exist - - The complete list of required files on `$HOST0` is: - - ``` - /tmp/${HOST0} - └── kubeadmcfg.yaml - --- - /etc/kubernetes/pki - ├── apiserver-etcd-client.crt - ├── apiserver-etcd-client.key - └── etcd - ├── ca.crt - ├── ca.key - ├── healthcheck-client.crt - ├── healthcheck-client.key - ├── peer.crt - ├── peer.key - ├── server.crt - └── server.key - ``` - - On `$HOST1`: - - ``` - $HOME - └── kubeadmcfg.yaml - --- - /etc/kubernetes/pki - ├── apiserver-etcd-client.crt - ├── apiserver-etcd-client.key - └── etcd - ├── ca.crt - ├── healthcheck-client.crt - ├── healthcheck-client.key - ├── peer.crt - ├── peer.key - ├── server.crt - └── server.key - ``` - - On `$HOST2` - - ``` - $HOME - └── kubeadmcfg.yaml - --- - /etc/kubernetes/pki - ├── apiserver-etcd-client.crt - ├── apiserver-etcd-client.key - └── etcd - ├── ca.crt - ├── healthcheck-client.crt - ├── healthcheck-client.key - ├── peer.crt - ├── peer.key - ├── server.crt - └── server.key - ``` - -1. Create the static pod manifests - - Now that the certificates and configs are in place it's time to create the - manifests. On each host run the `kubeadm` command to generate a static manifest - for etcd. - - ```sh - root@HOST0 $ kubeadm alpha phase etcd local --config=/tmp/${HOST0}/kubeadmcfg.yaml - root@HOST1 $ kubeadm alpha phase etcd local --config=/home/ubuntu/kubeadmcfg.yaml - root@HOST2 $ kubeadm alpha phase etcd local --config=/home/ubuntu/kubeadmcfg.yaml - ``` - -1. Optional: Check the cluster health - - ```sh - docker run --rm -it \ - --net host \ - -v /etc/kubernetes:/etc/kubernetes quay.io/coreos/etcd:v3.2.18 etcdctl \ - --cert-file /etc/kubernetes/pki/etcd/peer.crt \ - --key-file /etc/kubernetes/pki/etcd/peer.key \ - --ca-file /etc/kubernetes/pki/etcd/ca.crt \ - --endpoints https://${HOST0}:2379 cluster-health - ... - cluster is healthy - ``` - -{{% /capture %}} - -{{% capture whatsnext %}} - -Once your have a working 3 member etcd cluster, you can continue setting up a -highly available control plane using the [external etcd method with -kubeadm](/docs/setup/independent/high-availability/). - -{{% /capture %}} - - diff --git a/content/ko/docs/setup/independent/troubleshooting-kubeadm.md b/content/ko/docs/setup/independent/troubleshooting-kubeadm.md deleted file mode 100644 index f1e9704ab561f..0000000000000 --- a/content/ko/docs/setup/independent/troubleshooting-kubeadm.md +++ /dev/null @@ -1,266 +0,0 @@ ---- -title: Troubleshooting kubeadm -content_template: templates/concept -weight: 70 ---- - -{{% capture overview %}} - -As with any program, you might run into an error installing or running kubeadm. -This page lists some common failure scenarios and have provided steps that can help you understand and fix the problem. - -If your problem is not listed below, please follow the following steps: - -- If you think your problem is a bug with kubeadm: - - Go to [github.com/kubernetes/kubeadm](https://github.com/kubernetes/kubeadm/issues) and search for existing issues. - - If no issue exists, please [open one](https://github.com/kubernetes/kubeadm/issues/new) and follow the issue template. - -- If you are unsure about how kubeadm works, you can ask on [Slack](http://slack.k8s.io/) in #kubeadm, or open a question on [StackOverflow](https://stackoverflow.com/questions/tagged/kubernetes). Please include - relevant tags like `#kubernetes` and `#kubeadm` so folks can help you. - -{{% /capture %}} - -{{% capture body %}} - -## `ebtables` or some similar executable not found during installation - -If you see the following warnings while running `kubeadm init` - -```sh -[preflight] WARNING: ebtables not found in system path -[preflight] WARNING: ethtool not found in system path -``` - -Then you may be missing `ebtables`, `ethtool` or a similar executable on your node. You can install them with the following commands: - -- For Ubuntu/Debian users, run `apt install ebtables ethtool`. -- For CentOS/Fedora users, run `yum install ebtables ethtool`. - -## kubeadm blocks waiting for control plane during installation - -If you notice that `kubeadm init` hangs after printing out the following line: - -```sh -[apiclient] Created API client, waiting for the control plane to become ready -``` - -This may be caused by a number of problems. The most common are: - -- network connection problems. Check that your machine has full network connectivity before continuing. -- the default cgroup driver configuration for the kubelet differs from that used by Docker. - Check the system log file (e.g. `/var/log/message`) or examine the output from `journalctl -u kubelet`. If you see something like the following: - - ```shell - error: failed to run Kubelet: failed to create kubelet: - misconfiguration: kubelet cgroup driver: "systemd" is different from docker cgroup driver: "cgroupfs" - ``` - - There are two common ways to fix the cgroup driver problem: - - 1. Install Docker again following instructions - [here](/docs/setup/independent/install-kubeadm/#installing-docker). - 1. Change the kubelet config to match the Docker cgroup driver manually, you can refer to - [Configure cgroup driver used by kubelet on Master Node](/docs/setup/independent/install-kubeadm/#configure-cgroup-driver-used-by-kubelet-on-master-node) - for detailed instructions. - -- control plane Docker containers are crashlooping or hanging. You can check this by running `docker ps` and investigating each container by running `docker logs`. - -## kubeadm blocks when removing managed containers - -The following could happen if Docker halts and does not remove any Kubernetes-managed containers: - -```bash -sudo kubeadm reset -[preflight] Running pre-flight checks -[reset] Stopping the kubelet service -[reset] Unmounting mounted directories in "/var/lib/kubelet" -[reset] Removing kubernetes-managed containers -(block) -``` - -A possible solution is to restart the Docker service and then re-run `kubeadm reset`: - -```bash -sudo systemctl restart docker.service -sudo kubeadm reset -``` - -Inspecting the logs for docker may also be useful: - -```sh -journalctl -ul docker -``` - -## Pods in `RunContainerError`, `CrashLoopBackOff` or `Error` state - -Right after `kubeadm init` there should not be any pods in these states. - -- If there are pods in one of these states _right after_ `kubeadm init`, please open an - issue in the kubeadm repo. `coredns` (or `kube-dns`) should be in the `Pending` state - until you have deployed the network solution. -- If you see Pods in the `RunContainerError`, `CrashLoopBackOff` or `Error` state - after deploying the network solution and nothing happens to `coredns` (or `kube-dns`), - it's very likely that the Pod Network solution and nothing happens to the DNS server, it's very - likely that the Pod Network solution that you installed is somehow broken. You - might have to grant it more RBAC privileges or use a newer version. Please file - an issue in the Pod Network providers' issue tracker and get the issue triaged there. -- If you install a version of Docker older than 1.12.1, remove the `MountFlags=slave` option - when booting `dockerd` with `systemd` and restart `docker`. You can see the MountFlags in `/usr/lib/systemd/system/docker.service`. - MountFlags can interfere with volumes mounted by Kubernetes, and put the Pods in `CrashLoopBackOff` state. - The error happens when Kubernetes does not find `var/run/secrets/kubernetes.io/serviceaccount` files. - -## `coredns` (or `kube-dns`) is stuck in the `Pending` state - -This is **expected** and part of the design. kubeadm is network provider-agnostic, so the admin -should [install the pod network solution](/docs/concepts/cluster-administration/addons/) -of choice. You have to install a Pod Network -before CoreDNS may deployed fully. Hence the `Pending` state before the network is set up. - -## `HostPort` services do not work - -The `HostPort` and `HostIP` functionality is available depending on your Pod Network -provider. Please contact the author of the Pod Network solution to find out whether -`HostPort` and `HostIP` functionality are available. - -Calico, Canal, and Flannel CNI providers are verified to support HostPort. - -For more information, see the [CNI portmap documentation](https://github.com/containernetworking/plugins/blob/master/plugins/meta/portmap/README.md). - -If your network provider does not support the portmap CNI plugin, you may need to use the [NodePort feature of -services](/docs/concepts/services-networking/service/#nodeport) or use `HostNetwork=true`. - -## Pods are not accessible via their Service IP - -- Many network add-ons do not yet enable [hairpin mode](https://kubernetes.io/docs/tasks/debug-application-cluster/debug-service/#a-pod-cannot-reach-itself-via-service-ip) - which allows pods to access themselves via their Service IP. This is an issue related to - [CNI](https://github.com/containernetworking/cni/issues/476). Please contact the network - add-on provider to get the latest status of their support for hairpin mode. - -- If you are using VirtualBox (directly or via Vagrant), you will need to - ensure that `hostname -i` returns a routable IP address. By default the first - interface is connected to a non-routable host-only network. A work around - is to modify `/etc/hosts`, see this [Vagrantfile](https://github.com/errordeveloper/k8s-playground/blob/22dd39dfc06111235620e6c4404a96ae146f26fd/Vagrantfile#L11) - for an example. - -## TLS certificate errors - -The following error indicates a possible certificate mismatch. - -```none -# kubectl get pods -Unable to connect to the server: x509: certificate signed by unknown authority (possibly because of "crypto/rsa: verification error" while trying to verify candidate authority certificate "kubernetes") -``` - -- Verify that the `$HOME/.kube/config` file contains a valid certificate, and - regenerate a certificate if necessary. The certificates in a kubeconfig file - are base64 encoded. The `base64 -d` command can be used to decode the certificate - and `openssl x509 -text -noout` can be used for viewing the certificate information. -- Another workaround is to overwrite the existing `kubeconfig` for the "admin" user: - - ```sh - mv $HOME/.kube $HOME/.kube.bak - sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config - sudo chown $(id -u):$(id -g) $HOME/.kube/config - ``` - -## Default NIC When using flannel as the pod network in Vagrant - -The following error might indicate that something was wrong in the pod network: - -```sh -Error from server (NotFound): the server could not find the requested resource -``` - -- If you're using flannel as the pod network inside Vagrant, then you will have to specify the default interface name for flannel. - - Vagrant typically assigns two interfaces to all VMs. The first, for which all hosts are assigned the IP address `10.0.2.15`, is for external traffic that gets NATed. - - This may lead to problems with flannel, which defaults to the first interface on a host. This leads to all hosts thinking they have the same public IP address. To prevent this, pass the `--iface eth1` flag to flannel so that the second interface is chosen. - -## Non-public IP used for containers - -In some situations `kubectl logs` and `kubectl run` commands may return with the following errors in an otherwise functional cluster: - -```sh -Error from server: Get https://10.19.0.41:10250/containerLogs/default/mysql-ddc65b868-glc5m/mysql: dial tcp 10.19.0.41:10250: getsockopt: no route to host -``` - -- This may be due to Kubernetes using an IP that can not communicate with other IPs on the seemingly same subnet, possibly by policy of the machine provider. -- Digital Ocean assigns a public IP to `eth0` as well as a private one to be used internally as anchor for their floating IP feature, yet `kubelet` will pick the latter as the node's `InternalIP` instead of the public one. - - Use `ip addr show` to check for this scenario instead of `ifconfig` because `ifconfig` will not display the offending alias IP address. Alternatively an API endpoint specific to Digital Ocean allows to query for the anchor IP from the droplet: - - ```sh - curl http://169.254.169.254/metadata/v1/interfaces/public/0/anchor_ipv4/address - ``` - - The workaround is to tell `kubelet` which IP to use using `--node-ip`. When using Digital Ocean, it can be the public one (assigned to `eth0`) or the private one (assigned to `eth1`) should you want to use the optional private network. The [`KubeletExtraArgs` section of the kubeadm `NodeRegistrationOptions` structure](https://github.com/kubernetes/kubernetes/blob/release-1.12/cmd/kubeadm/app/apis/kubeadm/v1beta1/types.go) can be used for this. - - Then restart `kubelet`: - - ```sh - systemctl daemon-reload - systemctl restart kubelet - ``` - -## Services with externalTrafficPolicy=Local are not reachable - -On nodes where the hostname for the kubelet is overridden using the `--hostname-override` option, kube-proxy will default to treating 127.0.0.1 as the node IP, which results in rejecting connections for Services configured for `externalTrafficPolicy=Local`. This situation can be verified by checking the output of `kubectl -n kube-system logs `: - -```sh -W0507 22:33:10.372369 1 server.go:586] Failed to retrieve node info: nodes "ip-10-0-23-78" not found -W0507 22:33:10.372474 1 proxier.go:463] invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP -``` - -A workaround for this is to modify the kube-proxy DaemonSet in the following way: - -```sh -kubectl -n kube-system patch --type json daemonset kube-proxy -p "$(cat <<'EOF' -[ - { - "op": "add", - "path": "/spec/template/spec/containers/0/env", - "value": [ - { - "name": "NODE_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "spec.nodeName" - } - } - } - ] - }, - { - "op": "add", - "path": "/spec/template/spec/containers/0/command/-", - "value": "--hostname-override=${NODE_NAME}" - } -] -EOF -)" - -``` - -## `coredns` pods have `CrashLoopBackOff` or `Error` state - -If you have nodes that are running SELinux with an older version of Docker you might experience a scenario -where the `coredns` pods are not starting. To solve that you can try one of the following options: - -- Upgrade to a [newer version of Docker](/docs/setup/independent/install-kubeadm/#installing-docker). -- [Disable SELinux](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/security-enhanced_linux/sect-security-enhanced_linux-enabling_and_disabling_selinux-disabling_selinux). -- Modify the `coredns` deployment to set `allowPrivilegeEscalation` to `true`: - -```bash -kubectl -n kube-system get deployment coredns -o yaml | \ - sed 's/allowPrivilegeEscalation: false/allowPrivilegeEscalation: true/g' | \ - kubectl apply -f - -``` - -{{< warning >}} -**Warning**: Disabling SELinux or setting `allowPrivilegeEscalation` to `true` can compromise -the security of your cluster. -{{< /warning >}} - -{{% /capture %}} diff --git a/content/ko/docs/setup/pick-right-solution.md b/content/ko/docs/setup/pick-right-solution.md index cb6e294417fd7..0ff7c2c136593 100644 --- a/content/ko/docs/setup/pick-right-solution.md +++ b/content/ko/docs/setup/pick-right-solution.md @@ -217,7 +217,7 @@ Digital Rebar | kubeadm | any | metal | [docs](/docs/setup/ {{< note >}} -**참고:** 위의 표는 버전 테스트/사용된 노드의 지원 레벨을 기준으로 정렬된다. +위의 표는 버전 테스트/사용된 노드의 지원 레벨을 기준으로 정렬된다. {{< /note >}} ### 열 정의