From 55591a374bb90e800ca14842a87d93cdda67aaa6 Mon Sep 17 00:00:00 2001 From: Rajadeepan Date: Sun, 31 Mar 2019 16:35:12 +0000 Subject: [PATCH 1/4] Kubemark changes --- Gopkg.lock | 1 + Gopkg.toml | 2 + test/kubemark/kube-batch.yaml | 43 + test/kubemark/start-kubemark.sh | 55 + test/kubemark/stop-kubemark.sh | 7 + vendor/k8s.io/kubernetes/cluster/BUILD | 62 + vendor/k8s.io/kubernetes/cluster/OWNERS | 19 + vendor/k8s.io/kubernetes/cluster/clientbin.sh | 113 + vendor/k8s.io/kubernetes/cluster/common.sh | 507 +++ vendor/k8s.io/kubernetes/cluster/gce/BUILD | 23 + vendor/k8s.io/kubernetes/cluster/gce/OWNERS | 20 + .../kubernetes/cluster/gce/config-common.sh | 151 + .../kubernetes/cluster/gce/config-default.sh | 488 +++ .../k8s.io/kubernetes/cluster/gce/gci/BUILD | 71 + .../k8s.io/kubernetes/cluster/gce/gci/OWNERS | 6 + .../kubernetes/cluster/gce/gci/helper.sh | 32 + .../cluster/gce/gci/master-helper.sh | 177 + .../kubernetes/cluster/gce/gci/node-helper.sh | 45 + vendor/k8s.io/kubernetes/cluster/gce/util.sh | 3521 +++++++++++++++++ .../cluster/gce/windows/node-helper.sh | 53 + .../kubernetes/cluster/images/kubemark/BUILD | 36 + .../cluster/images/kubemark/Dockerfile | 17 + .../cluster/images/kubemark/Makefile | 36 + .../kubernetes/cluster/images/kubemark/OWNERS | 10 + vendor/k8s.io/kubernetes/cluster/kube-util.sh | 41 + vendor/k8s.io/kubernetes/cluster/kubectl.sh | 77 + .../k8s.io/kubernetes/cluster/kubemark/OWNERS | 10 + .../cluster/kubemark/gce/config-default.sh | 143 + .../kubernetes/cluster/kubemark/util.sh | 20 + .../kubernetes/cluster/skeleton/util.sh | 79 + .../k8s.io/kubernetes/cluster/windows/BUILD | 35 + .../k8s.io/kubernetes/cluster/windows/OWNERS | 2 + .../kubernetes/cluster/windows/node-helper.sh | 53 + vendor/k8s.io/kubernetes/hack/lib/BUILD | 28 + vendor/k8s.io/kubernetes/hack/lib/etcd.sh | 138 + vendor/k8s.io/kubernetes/hack/lib/golang.sh | 770 ++++ vendor/k8s.io/kubernetes/hack/lib/init.sh | 187 + vendor/k8s.io/kubernetes/hack/lib/logging.sh | 171 + vendor/k8s.io/kubernetes/hack/lib/util.sh | 839 ++++ vendor/k8s.io/kubernetes/hack/lib/version.sh | 175 + vendor/k8s.io/kubernetes/test/kubemark/BUILD | 14 + vendor/k8s.io/kubernetes/test/kubemark/OWNERS | 10 + .../test/kubemark/cloud-provider-config.sh | 21 + .../kubernetes/test/kubemark/common/util.sh | 49 + .../test/kubemark/configure-kubectl.sh | 20 + .../kubernetes/test/kubemark/gce/util.sh | 146 + .../kubernetes/test/kubemark/iks/shutdown.sh | 44 + .../kubernetes/test/kubemark/iks/startup.sh | 316 ++ .../kubernetes/test/kubemark/iks/util.sh | 222 ++ .../test/kubemark/master-log-dump.sh | 26 + .../test/kubemark/pre-existing/README.md | 54 + .../test/kubemark/pre-existing/util.sh | 27 + .../kubemark/resources/addons/heapster.json | 82 + .../kubemark/resources/addons/kube_dns.yaml | 188 + .../cluster-autoscaler_template.json | 94 + .../kubemark/resources/heapster_template.json | 82 + .../test/kubemark/resources/hollow-node.yaml | 132 + .../resources/hollow-node_template.yaml | 132 + .../kubemark/resources/kernel-monitor.json | 20 + .../kubemark/resources/kube_dns_template.yaml | 188 + .../kubemark/resources/kubemark-master-env.sh | 31 + .../test/kubemark/resources/kubemark-ns.json | 7 + .../addons/kubemark-rbac-bindings/README.md | 1 + .../cluster-autoscaler-binding.yaml | 16 + .../heapster-binding.yaml | 15 + .../kube-dns-binding.yaml | 15 + .../kubecfg-binding.yaml | 18 + .../kubelet-binding.yaml | 18 + .../kubemark-rbac-bindings/npd-binding.yaml | 15 + .../resources/manifests/etcd-events.yaml | 51 + .../kubemark/resources/manifests/etcd.yaml | 50 + .../manifests/kube-addon-manager.yaml | 34 + .../resources/manifests/kube-apiserver.yaml | 70 + .../manifests/kube-controller-manager.yaml | 54 + .../resources/manifests/kube-scheduler.yaml | 43 + .../resources/start-kubemark-master.sh | 738 ++++ .../kubernetes/test/kubemark/run-e2e-tests.sh | 54 + .../kubernetes/test/kubemark/skeleton/util.sh | 93 + .../test/kubemark/start-kubemark.sh | 504 +++ .../kubernetes/test/kubemark/stop-kubemark.sh | 47 + 80 files changed, 12004 insertions(+) create mode 100755 test/kubemark/kube-batch.yaml create mode 100755 test/kubemark/start-kubemark.sh create mode 100755 test/kubemark/stop-kubemark.sh create mode 100644 vendor/k8s.io/kubernetes/cluster/BUILD create mode 100644 vendor/k8s.io/kubernetes/cluster/OWNERS create mode 100755 vendor/k8s.io/kubernetes/cluster/clientbin.sh create mode 100755 vendor/k8s.io/kubernetes/cluster/common.sh create mode 100644 vendor/k8s.io/kubernetes/cluster/gce/BUILD create mode 100644 vendor/k8s.io/kubernetes/cluster/gce/OWNERS create mode 100644 vendor/k8s.io/kubernetes/cluster/gce/config-common.sh create mode 100755 vendor/k8s.io/kubernetes/cluster/gce/config-default.sh create mode 100644 vendor/k8s.io/kubernetes/cluster/gce/gci/BUILD create mode 100644 vendor/k8s.io/kubernetes/cluster/gce/gci/OWNERS create mode 100755 vendor/k8s.io/kubernetes/cluster/gce/gci/helper.sh create mode 100755 vendor/k8s.io/kubernetes/cluster/gce/gci/master-helper.sh create mode 100755 vendor/k8s.io/kubernetes/cluster/gce/gci/node-helper.sh create mode 100755 vendor/k8s.io/kubernetes/cluster/gce/util.sh create mode 100755 vendor/k8s.io/kubernetes/cluster/gce/windows/node-helper.sh create mode 100644 vendor/k8s.io/kubernetes/cluster/images/kubemark/BUILD create mode 100644 vendor/k8s.io/kubernetes/cluster/images/kubemark/Dockerfile create mode 100644 vendor/k8s.io/kubernetes/cluster/images/kubemark/Makefile create mode 100644 vendor/k8s.io/kubernetes/cluster/images/kubemark/OWNERS create mode 100755 vendor/k8s.io/kubernetes/cluster/kube-util.sh create mode 100755 vendor/k8s.io/kubernetes/cluster/kubectl.sh create mode 100644 vendor/k8s.io/kubernetes/cluster/kubemark/OWNERS create mode 100644 vendor/k8s.io/kubernetes/cluster/kubemark/gce/config-default.sh create mode 100644 vendor/k8s.io/kubernetes/cluster/kubemark/util.sh create mode 100644 vendor/k8s.io/kubernetes/cluster/skeleton/util.sh create mode 100644 vendor/k8s.io/kubernetes/cluster/windows/BUILD create mode 100644 vendor/k8s.io/kubernetes/cluster/windows/OWNERS create mode 100755 vendor/k8s.io/kubernetes/cluster/windows/node-helper.sh create mode 100644 vendor/k8s.io/kubernetes/hack/lib/BUILD create mode 100755 vendor/k8s.io/kubernetes/hack/lib/etcd.sh create mode 100755 vendor/k8s.io/kubernetes/hack/lib/golang.sh create mode 100755 vendor/k8s.io/kubernetes/hack/lib/init.sh create mode 100644 vendor/k8s.io/kubernetes/hack/lib/logging.sh create mode 100755 vendor/k8s.io/kubernetes/hack/lib/util.sh create mode 100644 vendor/k8s.io/kubernetes/hack/lib/version.sh create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/BUILD create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/OWNERS create mode 100755 vendor/k8s.io/kubernetes/test/kubemark/cloud-provider-config.sh create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/common/util.sh create mode 100755 vendor/k8s.io/kubernetes/test/kubemark/configure-kubectl.sh create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/gce/util.sh create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/iks/shutdown.sh create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/iks/startup.sh create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/iks/util.sh create mode 100755 vendor/k8s.io/kubernetes/test/kubemark/master-log-dump.sh create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/pre-existing/README.md create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/pre-existing/util.sh create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/resources/addons/heapster.json create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/resources/addons/kube_dns.yaml create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/resources/cluster-autoscaler_template.json create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/resources/heapster_template.json create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/resources/hollow-node.yaml create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/resources/hollow-node_template.yaml create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/resources/kernel-monitor.json create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/resources/kube_dns_template.yaml create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/resources/kubemark-master-env.sh create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/resources/kubemark-ns.json create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/README.md create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/cluster-autoscaler-binding.yaml create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/heapster-binding.yaml create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/kube-dns-binding.yaml create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/kubecfg-binding.yaml create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/kubelet-binding.yaml create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/npd-binding.yaml create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/etcd-events.yaml create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/etcd.yaml create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-addon-manager.yaml create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-apiserver.yaml create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-controller-manager.yaml create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-scheduler.yaml create mode 100755 vendor/k8s.io/kubernetes/test/kubemark/resources/start-kubemark-master.sh create mode 100755 vendor/k8s.io/kubernetes/test/kubemark/run-e2e-tests.sh create mode 100644 vendor/k8s.io/kubernetes/test/kubemark/skeleton/util.sh create mode 100755 vendor/k8s.io/kubernetes/test/kubemark/start-kubemark.sh create mode 100755 vendor/k8s.io/kubernetes/test/kubemark/stop-kubemark.sh diff --git a/Gopkg.lock b/Gopkg.lock index c8321a9d2..702c8f3e8 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1155,6 +1155,7 @@ "k8s.io/kubernetes/pkg/api/v1/pod", "k8s.io/kubernetes/pkg/apis/scheduling", "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset", + "k8s.io/kubernetes/pkg/kubelet/apis", "k8s.io/kubernetes/pkg/scheduler/algorithm", "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates", "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities", diff --git a/Gopkg.toml b/Gopkg.toml index 7a3d87e7d..019e1917e 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -33,6 +33,8 @@ required = [ "k8s.io/code-generator/cmd/defaulter-gen", ] +noverify = ["k8s.io/kubernetes"] + [[constraint]] branch = "master" name = "github.com/golang/glog" diff --git a/test/kubemark/kube-batch.yaml b/test/kubemark/kube-batch.yaml new file mode 100755 index 000000000..0365c302d --- /dev/null +++ b/test/kubemark/kube-batch.yaml @@ -0,0 +1,43 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-batch + namespace: kube-system +spec: + hostNetwork: true + nodeName: kubernetes-master + containers: + - name: kube-batch + image: kubesigs/kube-batch:v0.4.2 + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 100m + command: + - /bin/sh + - -c + - /usr/local/bin/kube-batch + --v=4 --logtostderr --schedule-period=100ms --listen-address=:8081 --kubeconfig=/etc/srv/kubernetes/kubeconfig.kubemark + 1>>/var/log/kube-batch.log 2>&1 + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: 10251 + scheme: HTTP + initialDelaySeconds: 15 + timeoutSeconds: 15 + volumeMounts: + - name: srvkube + mountPath: /etc/srv/kubernetes + readOnly: true + - name: logfile + mountPath: /var/log/kube-batch.log + volumes: + - name: srvkube + hostPath: + path: /etc/srv/kubernetes + - name: logfile + hostPath: + path: /var/log/kube-batch.log + type: FileOrCreate diff --git a/test/kubemark/start-kubemark.sh b/test/kubemark/start-kubemark.sh new file mode 100755 index 000000000..211b5a70b --- /dev/null +++ b/test/kubemark/start-kubemark.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash + +TMP_ROOT="$(dirname "${BASH_SOURCE}")/../../vendor/k8s.io/kubernetes" +KUBE_ROOT=$(readlink -e "${TMP_ROOT}" 2> /dev/null || perl -MCwd -e 'print Cwd::abs_path shift' "${TMP_ROOT}") +KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh" +KUBEMARK_DIRECTORY="${KUBE_ROOT}/test/kubemark" +RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources" +CRD_DIRECTORY="${KUBE_ROOT}/../../../deployment/kube-batch/templates" +QUEUE_DIR="${KUBE_ROOT}/../../../config/queue" + +#Build kubernetes Binary and copy to _output folder +if [ ! -d "$KUBE_ROOT/_output" ]; then + mkdir -p /tmp/src/k8s.io + cd /tmp/src/k8s.io + git clone https://github.com/kubernetes/kubernetes.git + cd kubernetes + make quick-release + mv _output/ $KUBE_ROOT +fi + + +#Appending lines to start kube-batch +src="start-kubemaster-component \"kube-scheduler\"" +dest="start-kubemaster-component \"kube-scheduler\" \ncp \${KUBE_ROOT}/kubeconfig.kubemark /etc/srv/kubernetes \nstart-kubemaster-component \"kube-batch\"" +sed -i "s@${src}@${dest}@g" "${KUBEMARK_DIRECTORY}/resources/start-kubemark-master.sh" + +#Appending lines to copy kube-batch.yaml +src1="\"\${SERVER_BINARY_TAR}\" \\\\" +dest1="\"\${SERVER_BINARY_TAR}\" \\\\\n \"\${RESOURCE_DIRECTORY}/kube-batch.yaml\" \\\\" +sed -i "s@${src1}@${dest1}@g" "${KUBEMARK_DIRECTORY}/start-kubemark.sh" + + +cp kube-batch.yaml ${RESOURCE_DIRECTORY} + +bash -x ${KUBEMARK_DIRECTORY}/start-kubemark.sh + +#creating the CRD Queue and PodGroup +podgroup=$("${KUBECTL}" --kubeconfig="${RESOURCE_DIRECTORY}"/kubeconfig.kubemark create -f "${CRD_DIRECTORY}"/scheduling_v1alpha1_queue.yaml 2> /dev/null) || true +queue=$("${KUBECTL}" --kubeconfig="${RESOURCE_DIRECTORY}"/kubeconfig.kubemark create -f "${CRD_DIRECTORY}"/scheduling_v1alpha1_podgroup.yaml 2> /dev/null) || true + +#creating default queue +defaultqueue=$("${KUBECTL}" --kubeconfig="${RESOURCE_DIRECTORY}"/kubeconfig.kubemark create -f "${QUEUE_DIR}"/default.yaml 2> /dev/null) || true + +#copy the kubemark config +cp ${RESOURCE_DIRECTORY}/kubeconfig.kubemark ./ + +#Reverting the script changes in the vendor and tmp +data="kube-batch.yaml" +sed -i "/${data}/d" "${KUBEMARK_DIRECTORY}/start-kubemark.sh" +data1="kube-batch" +data2="kubeconfig.kubemark" +sed -i "/${data1}/d" "${KUBEMARK_DIRECTORY}/resources/start-kubemark-master.sh" +sed -i "/${data2}/d" "${KUBEMARK_DIRECTORY}/resources/start-kubemark-master.sh" +rm -rf ${RESOURCE_DIRECTORY}/kube-batch.yaml +rm -rf /tmp/src/ diff --git a/test/kubemark/stop-kubemark.sh b/test/kubemark/stop-kubemark.sh new file mode 100755 index 000000000..58271516f --- /dev/null +++ b/test/kubemark/stop-kubemark.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +TMP_ROOT="$(dirname "${BASH_SOURCE}")/../../vendor/k8s.io/kubernetes" +KUBE_ROOT=$(readlink -e "${TMP_ROOT}" 2> /dev/null || perl -MCwd -e 'print Cwd::abs_path shift' "${TMP_ROOT}") +KUBEMARK_DIRECTORY="${KUBE_ROOT}/test/kubemark" + +bash -x ${KUBEMARK_DIRECTORY}/stop-kubemark.sh diff --git a/vendor/k8s.io/kubernetes/cluster/BUILD b/vendor/k8s.io/kubernetes/cluster/BUILD new file mode 100644 index 000000000..9b2ba5395 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/BUILD @@ -0,0 +1,62 @@ +package(default_visibility = ["//visibility:public"]) + +load("@io_k8s_repo_infra//defs:pkg.bzl", "pkg_tar") + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//cluster/addons:all-srcs", + "//cluster/gce:all-srcs", + "//cluster/images/conformance:all-srcs", + "//cluster/images/etcd-version-monitor:all-srcs", + "//cluster/images/etcd/migrate:all-srcs", + "//cluster/images/hyperkube:all-srcs", + "//cluster/images/kubemark:all-srcs", + ], + tags = ["automanaged"], +) + +pkg_tar( + name = "manifests", + mode = "0644", + package_dir = "kubernetes/gci-trusty", + deps = [ + "//cluster/addons", + "//cluster/gce/addons", + "//cluster/gce/gci:gci-trusty-manifests", + "//cluster/gce/manifests:gce-master-manifests", + ], +) + +# These tests just verify that bash can interpret the file. +sh_test( + name = "common_test", + srcs = ["common.sh"], + deps = [ + "//hack/lib", + ], +) + +sh_test( + name = "clientbin_test", + srcs = ["clientbin.sh"], + deps = [ + "//hack/lib", + ], +) + +sh_test( + name = "kube-util_test", + srcs = ["kube-util.sh"], + deps = [ + "//hack/lib", + ], +) diff --git a/vendor/k8s.io/kubernetes/cluster/OWNERS b/vendor/k8s.io/kubernetes/cluster/OWNERS new file mode 100644 index 000000000..bc29920a0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/OWNERS @@ -0,0 +1,19 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - bentheelder + - eparis + - jbeda + - Katharine + - mikedanese + - roberthbailey + - zmerlynn +approvers: + - eparis + - jbeda + - mikedanese + - roberthbailey + - spiffxp + - zmerlynn +labels: +- sig/cluster-lifecycle diff --git a/vendor/k8s.io/kubernetes/cluster/clientbin.sh b/vendor/k8s.io/kubernetes/cluster/clientbin.sh new file mode 100755 index 000000000..078729eb9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/clientbin.sh @@ -0,0 +1,113 @@ +#!/usr/bin/env bash + +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=${KUBE_ROOT:-$(dirname "${BASH_SOURCE}")/..} + +# Detect the OS name/arch so that we can find our binary +case "$(uname -s)" in + Darwin) + host_os=darwin + ;; + Linux) + host_os=linux + ;; + *) + echo "Unsupported host OS. Must be Linux or Mac OS X." >&2 + exit 1 + ;; +esac + +case "$(uname -m)" in + x86_64*) + host_arch=amd64 + ;; + i?86_64*) + host_arch=amd64 + ;; + amd64*) + host_arch=amd64 + ;; + arm*) + host_arch=arm + ;; + aarch64*) + host_arch=arm64 + ;; + i?86*) + host_arch=386 + ;; + s390x*) + host_arch=s390x + ;; + ppc64le*) + host_arch=ppc64le + ;; + *) + echo "Unsupported host arch. Must be x86_64, 386, arm, s390x or ppc64le." >&2 + exit 1 + ;; +esac + +# Get the absolute path of the directory component of a file, i.e. the +# absolute path of the dirname of $1. +get_absolute_dirname() { + echo "$(cd "$(dirname "$1")" && pwd)" +} + +function get_bin() { + bin="${1:-}" + srcdir="${2:-}" + if [[ "${bin}" == "" ]]; then + echo "Binary name is required" + exit 1 + fi + if [[ "${srcdir}" == "" ]]; then + echo "Source directory path is required" + exit 1 + fi + + locations=( + "${KUBE_ROOT}/_output/bin/${bin}" + "${KUBE_ROOT}/_output/dockerized/bin/${host_os}/${host_arch}/${bin}" + "${KUBE_ROOT}/_output/local/bin/${host_os}/${host_arch}/${bin}" + "${KUBE_ROOT}/platforms/${host_os}/${host_arch}/${bin}" + ) + # Also search for binary in bazel build tree. + # The bazel go rules place binaries in subtrees like + # "bazel-bin/source/path/linux_amd64_pure_stripped/binaryname", so make sure + # the platform name is matched in the path. + locations+=($(find "${KUBE_ROOT}/bazel-bin/${srcdir}" -type f -executable \ + -path "*/${host_os}_${host_arch}*/${bin}" 2>/dev/null || true) ) + echo $( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 ) +} + +function print_error() { + { + echo "It looks as if you don't have a compiled ${1:-} binary" + echo + echo "If you are running from a clone of the git repo, please run" + echo "'./build/run.sh make cross'. Note that this requires having" + echo "Docker installed." + echo + echo "If you are running from a binary release tarball, something is wrong. " + echo "Look at http://kubernetes.io/ for information on how to contact the " + echo "development team for help." + } >&2 +} diff --git a/vendor/k8s.io/kubernetes/cluster/common.sh b/vendor/k8s.io/kubernetes/cluster/common.sh new file mode 100755 index 000000000..a2613b558 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/common.sh @@ -0,0 +1,507 @@ +#!/usr/bin/env bash + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Common utilities for kube-up/kube-down + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(cd $(dirname "${BASH_SOURCE}")/.. && pwd) + +DEFAULT_KUBECONFIG="${HOME:-.}/.kube/config" + +source "${KUBE_ROOT}/hack/lib/util.sh" +# KUBE_RELEASE_VERSION_REGEX matches things like "v1.2.3" or "v1.2.3-alpha.4" +# +# NOTE This must match the version_regex in build/common.sh +# kube::release::parse_and_validate_release_version() +KUBE_RELEASE_VERSION_REGEX="^v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)(-([a-zA-Z0-9]+)\\.(0|[1-9][0-9]*))?$" +KUBE_RELEASE_VERSION_DASHED_REGEX="v(0|[1-9][0-9]*)-(0|[1-9][0-9]*)-(0|[1-9][0-9]*)(-([a-zA-Z0-9]+)-(0|[1-9][0-9]*))?" + +# KUBE_CI_VERSION_REGEX matches things like "v1.2.3-alpha.4.56+abcdefg" This +# +# NOTE This must match the version_regex in build/common.sh +# kube::release::parse_and_validate_ci_version() +KUBE_CI_VERSION_REGEX="^v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)-([a-zA-Z0-9]+)\\.(0|[1-9][0-9]*)(\\.(0|[1-9][0-9]*)\\+[-0-9a-z]*)?$" +KUBE_CI_VERSION_DASHED_REGEX="^v(0|[1-9][0-9]*)-(0|[1-9][0-9]*)-(0|[1-9][0-9]*)-([a-zA-Z0-9]+)-(0|[1-9][0-9]*)(-(0|[1-9][0-9]*)\\+[-0-9a-z]*)?" + +# Generate kubeconfig data for the created cluster. +# Assumed vars: +# KUBE_USER +# KUBE_PASSWORD +# KUBE_MASTER_IP +# KUBECONFIG +# CONTEXT +# +# If the apiserver supports bearer auth, also provide: +# KUBE_BEARER_TOKEN +# +# If the kubeconfig context being created should NOT be set as the current context +# SECONDARY_KUBECONFIG=true +# +# To explicitly name the context being created, use OVERRIDE_CONTEXT +# +# The following can be omitted for --insecure-skip-tls-verify +# KUBE_CERT +# KUBE_KEY +# CA_CERT +function create-kubeconfig() { + KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG} + local kubectl="${KUBE_ROOT}/cluster/kubectl.sh" + SECONDARY_KUBECONFIG=${SECONDARY_KUBECONFIG:-} + OVERRIDE_CONTEXT=${OVERRIDE_CONTEXT:-} + + if [[ "$OVERRIDE_CONTEXT" != "" ]];then + CONTEXT=$OVERRIDE_CONTEXT + fi + + # KUBECONFIG determines the file we write to, but it may not exist yet + OLD_IFS=$IFS + IFS=':' + for cfg in ${KUBECONFIG} ; do + if [[ ! -e "${cfg}" ]]; then + mkdir -p "$(dirname "${cfg}")" + touch "${cfg}" + fi + done + IFS=$OLD_IFS + + local cluster_args=( + "--server=${KUBE_SERVER:-https://${KUBE_MASTER_IP}}" + ) + if [[ -z "${CA_CERT:-}" ]]; then + cluster_args+=("--insecure-skip-tls-verify=true") + else + cluster_args+=( + "--certificate-authority=${CA_CERT}" + "--embed-certs=true" + ) + fi + + local user_args=() + if [[ ! -z "${KUBE_BEARER_TOKEN:-}" ]]; then + user_args+=( + "--token=${KUBE_BEARER_TOKEN}" + ) + elif [[ ! -z "${KUBE_USER:-}" && ! -z "${KUBE_PASSWORD:-}" ]]; then + user_args+=( + "--username=${KUBE_USER}" + "--password=${KUBE_PASSWORD}" + ) + fi + if [[ ! -z "${KUBE_CERT:-}" && ! -z "${KUBE_KEY:-}" ]]; then + user_args+=( + "--client-certificate=${KUBE_CERT}" + "--client-key=${KUBE_KEY}" + "--embed-certs=true" + ) + fi + + KUBECONFIG="${KUBECONFIG}" "${kubectl}" config set-cluster "${CONTEXT}" "${cluster_args[@]}" + if [[ -n "${user_args[@]:-}" ]]; then + KUBECONFIG="${KUBECONFIG}" "${kubectl}" config set-credentials "${CONTEXT}" "${user_args[@]}" + fi + KUBECONFIG="${KUBECONFIG}" "${kubectl}" config set-context "${CONTEXT}" --cluster="${CONTEXT}" --user="${CONTEXT}" + + if [[ "${SECONDARY_KUBECONFIG}" != "true" ]];then + KUBECONFIG="${KUBECONFIG}" "${kubectl}" config use-context "${CONTEXT}" --cluster="${CONTEXT}" + fi + + # If we have a bearer token, also create a credential entry with basic auth + # so that it is easy to discover the basic auth password for your cluster + # to use in a web browser. + if [[ ! -z "${KUBE_BEARER_TOKEN:-}" && ! -z "${KUBE_USER:-}" && ! -z "${KUBE_PASSWORD:-}" ]]; then + KUBECONFIG="${KUBECONFIG}" "${kubectl}" config set-credentials "${CONTEXT}-basic-auth" "--username=${KUBE_USER}" "--password=${KUBE_PASSWORD}" + fi + + echo "Wrote config for ${CONTEXT} to ${KUBECONFIG}" +} + +# Clear kubeconfig data for a context +# Assumed vars: +# KUBECONFIG +# CONTEXT +# +# To explicitly name the context being removed, use OVERRIDE_CONTEXT +function clear-kubeconfig() { + export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG} + OVERRIDE_CONTEXT=${OVERRIDE_CONTEXT:-} + + if [[ "$OVERRIDE_CONTEXT" != "" ]];then + CONTEXT=$OVERRIDE_CONTEXT + fi + + local kubectl="${KUBE_ROOT}/cluster/kubectl.sh" + # Unset the current-context before we delete it, as otherwise kubectl errors. + local cc=$("${kubectl}" config view -o jsonpath='{.current-context}') + if [[ "${cc}" == "${CONTEXT}" ]]; then + "${kubectl}" config unset current-context + fi + "${kubectl}" config unset "clusters.${CONTEXT}" + "${kubectl}" config unset "users.${CONTEXT}" + "${kubectl}" config unset "users.${CONTEXT}-basic-auth" + "${kubectl}" config unset "contexts.${CONTEXT}" + + echo "Cleared config for ${CONTEXT} from ${KUBECONFIG}" +} + +# Gets username, password for the current-context in kubeconfig, if they exist. +# Assumed vars: +# KUBECONFIG # if unset, defaults to global +# KUBE_CONTEXT # if unset, defaults to current-context +# +# Vars set: +# KUBE_USER +# KUBE_PASSWORD +# +# KUBE_USER,KUBE_PASSWORD will be empty if no current-context is set, or +# the current-context user does not exist or contain basicauth entries. +function get-kubeconfig-basicauth() { + export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG} + + local cc=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.current-context}") + if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then + cc="${KUBE_CONTEXT}" + fi + local user=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.contexts[?(@.name == \"${cc}\")].context.user}") + get-kubeconfig-user-basicauth "${user}" + + if [[ -z "${KUBE_USER:-}" || -z "${KUBE_PASSWORD:-}" ]]; then + # kube-up stores username/password in a an additional kubeconfig section + # suffixed with "-basic-auth". Cloudproviders like GKE store in directly + # in the top level section along with the other credential information. + # TODO: Handle this uniformly, either get rid of "basic-auth" or + # consolidate its usage into a function across scripts in cluster/ + get-kubeconfig-user-basicauth "${user}-basic-auth" + fi +} + +# Sets KUBE_USER and KUBE_PASSWORD to the username and password specified in +# the kubeconfig section corresponding to $1. +# +# Args: +# $1 kubeconfig section to look for basic auth (eg: user or user-basic-auth). +# Assumed vars: +# KUBE_ROOT +# Vars set: +# KUBE_USER +# KUBE_PASSWORD +function get-kubeconfig-user-basicauth() { + KUBE_USER=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.users[?(@.name == \"$1\")].user.username}") + KUBE_PASSWORD=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.users[?(@.name == \"$1\")].user.password}") +} + +# Generate basic auth user and password. + +# Vars set: +# KUBE_USER +# KUBE_PASSWORD +function gen-kube-basicauth() { + KUBE_USER=admin + KUBE_PASSWORD=$(python -c 'import string,random; print("".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16)))') +} + +# Get the bearer token for the current-context in kubeconfig if one exists. +# Assumed vars: +# KUBECONFIG # if unset, defaults to global +# KUBE_CONTEXT # if unset, defaults to current-context +# +# Vars set: +# KUBE_BEARER_TOKEN +# +# KUBE_BEARER_TOKEN will be empty if no current-context is set, or the +# current-context user does not exist or contain a bearer token entry. +function get-kubeconfig-bearertoken() { + export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG} + + local cc=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.current-context}") + if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then + cc="${KUBE_CONTEXT}" + fi + local user=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.contexts[?(@.name == \"${cc}\")].context.user}") + KUBE_BEARER_TOKEN=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.users[?(@.name == \"${user}\")].user.token}") +} + +# Generate bearer token. +# +# Vars set: +# KUBE_BEARER_TOKEN +function gen-kube-bearertoken() { + KUBE_BEARER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) +} + +function load-or-gen-kube-basicauth() { + if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then + get-kubeconfig-basicauth + fi + + if [[ -z "${KUBE_USER:-}" || -z "${KUBE_PASSWORD:-}" ]]; then + gen-kube-basicauth + fi + + # Make sure they don't contain any funny characters. + if ! [[ "${KUBE_USER}" =~ ^[-._@a-zA-Z0-9]+$ ]]; then + echo "Bad KUBE_USER string." + exit 1 + fi + if ! [[ "${KUBE_PASSWORD}" =~ ^[-._@#%/a-zA-Z0-9]+$ ]]; then + echo "Bad KUBE_PASSWORD string." + exit 1 + fi +} + +# Sets KUBE_VERSION variable to the proper version number (e.g. "v1.0.6", +# "v1.2.0-alpha.1.881+376438b69c7612") or a version' publication of the form +# / (e.g. "release/stable",' "ci/latest-1"). +# +# See the docs on getting builds for more information about version +# publication. +# +# Args: +# $1 version string from command line +# Vars set: +# KUBE_VERSION +function set_binary_version() { + if [[ "${1}" =~ "/" ]]; then + IFS='/' read -a path <<< "${1}" + if [[ "${path[0]}" == "release" ]]; then + KUBE_VERSION=$(gsutil cat "gs://kubernetes-release/${1}.txt") + else + KUBE_VERSION=$(gsutil cat "gs://kubernetes-release-dev/${1}.txt") + fi + else + KUBE_VERSION=${1} + fi +} + +# Search for the specified tarball in the various known output locations, +# echoing the location if found. +# +# Assumed vars: +# KUBE_ROOT +# +# Args: +# $1 name of tarball to search for +function find-tar() { + local -r tarball=$1 + locations=( + "${KUBE_ROOT}/node/${tarball}" + "${KUBE_ROOT}/server/${tarball}" + "${KUBE_ROOT}/_output/release-tars/${tarball}" + "${KUBE_ROOT}/bazel-bin/build/release-tars/${tarball}" + ) + location=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 ) + + if [[ ! -f "${location}" ]]; then + echo "!!! Cannot find ${tarball}" >&2 + exit 1 + fi + echo "${location}" +} + +# Verify and find the various tar files that we are going to use on the server. +# +# Assumed vars: +# KUBE_ROOT +# Vars set: +# NODE_BINARY_TAR +# SERVER_BINARY_TAR +# KUBE_MANIFESTS_TAR +function find-release-tars() { + SERVER_BINARY_TAR=$(find-tar kubernetes-server-linux-amd64.tar.gz) + if [[ "${NUM_WINDOWS_NODES}" -gt "0" ]]; then + NODE_BINARY_TAR=$(find-tar kubernetes-node-windows-amd64.tar.gz) + fi + + # This tarball is used by GCI, Ubuntu Trusty, and Container Linux. + KUBE_MANIFESTS_TAR= + if [[ "${MASTER_OS_DISTRIBUTION:-}" == "trusty" || "${MASTER_OS_DISTRIBUTION:-}" == "gci" || "${MASTER_OS_DISTRIBUTION:-}" == "ubuntu" ]] || \ + [[ "${NODE_OS_DISTRIBUTION:-}" == "trusty" || "${NODE_OS_DISTRIBUTION:-}" == "gci" || "${NODE_OS_DISTRIBUTION:-}" == "ubuntu" || "${NODE_OS_DISTRIBUTION:-}" == "custom" ]] ; then + KUBE_MANIFESTS_TAR=$(find-tar kubernetes-manifests.tar.gz) + fi +} + +# Run the cfssl command to generates certificate files for etcd service, the +# certificate files will save in $1 directory. +# +# Optional vars: +# GEN_ETCD_CA_CERT (CA cert encode with base64 and ZIP compression) +# GEN_ETCD_CA_KEY (CA key encode with base64) +# +# If GEN_ETCD_CA_CERT or GEN_ETCD_CA_KEY is not specified, it will generates certs for CA. +# +# Args: +# $1 (the directory that certificate files to save) +# $2 (the ip of etcd member) +# $3 (the type of etcd certificates, must be one of client, server, peer) +# $4 (the prefix of the certificate filename, default is $3) +function generate-etcd-cert() { + local cert_dir=${1} + local member_ip=${2} + local type_cert=${3} + local prefix=${4:-"${type_cert}"} + + local GEN_ETCD_CA_CERT=${GEN_ETCD_CA_CERT:-} + local GEN_ETCD_CA_KEY=${GEN_ETCD_CA_KEY:-} + + mkdir -p "${cert_dir}" + pushd "${cert_dir}" + + kube::util::ensure-cfssl . + + if [ ! -r "ca-config.json" ]; then + cat >ca-config.json <ca-csr.json < ca.pem + echo "${ca_key}" | base64 --decode > ca-key.pem + fi + + if [[ ! -r "ca.pem" || ! -r "ca-key.pem" ]]; then + ${CFSSL_BIN} gencert -initca ca-csr.json | ${CFSSLJSON_BIN} -bare ca - + fi + + case "${type_cert}" in + client) + echo "Generate client certificates..." + echo '{"CN":"client","hosts":["*"],"key":{"algo":"ecdsa","size":256}}' \ + | ${CFSSL_BIN} gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client - \ + | ${CFSSLJSON_BIN} -bare "${prefix}" + ;; + server) + echo "Generate server certificates..." + echo '{"CN":"'${member_ip}'","hosts":[""],"key":{"algo":"ecdsa","size":256}}' \ + | ${CFSSL_BIN} gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server -hostname="${member_ip},127.0.0.1" - \ + | ${CFSSLJSON_BIN} -bare "${prefix}" + ;; + peer) + echo "Generate peer certificates..." + echo '{"CN":"'${member_ip}'","hosts":[""],"key":{"algo":"ecdsa","size":256}}' \ + | ${CFSSL_BIN} gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer -hostname="${member_ip},127.0.0.1" - \ + | ${CFSSLJSON_BIN} -bare "${prefix}" + ;; + *) + echo "Unknow, unsupported etcd certs type: ${type_cert}" >&2 + echo "Supported type: client, server, peer" >&2 + exit 2 + esac + + popd +} + +# Check whether required binaries exist, prompting to download +# if missing. +# If KUBERNETES_SKIP_CONFIRM is set to y, we'll automatically download binaries +# without prompting. +function verify-kube-binaries() { + if ! "${KUBE_ROOT}/cluster/kubectl.sh" version --client >&/dev/null; then + echo "!!! kubectl appears to be broken or missing" + download-release-binaries + fi +} + +# Check whether required release artifacts exist, prompting to download +# if missing. +# If KUBERNETES_SKIP_CONFIRM is set to y, we'll automatically download binaries +# without prompting. +function verify-release-tars() { + if ! $(find-release-tars); then + download-release-binaries + fi +} + +# Download release artifacts. +function download-release-binaries() { + get_binaries_script="${KUBE_ROOT}/cluster/get-kube-binaries.sh" + local resp="y" + if [[ ! "${KUBERNETES_SKIP_CONFIRM:-n}" =~ ^[yY]$ ]]; then + echo "Required release artifacts appear to be missing. Do you wish to download them? [Y/n]" + read resp + fi + if [[ "${resp}" =~ ^[nN]$ ]]; then + echo "You must download release artifacts to continue. You can use " + echo " ${get_binaries_script}" + echo "to do this for your automatically." + exit 1 + fi + "${get_binaries_script}" +} + +# Run pushd without stack output +function pushd() { + command pushd $@ > /dev/null +} + +# Run popd without stack output +function popd() { + command popd $@ > /dev/null +} diff --git a/vendor/k8s.io/kubernetes/cluster/gce/BUILD b/vendor/k8s.io/kubernetes/cluster/gce/BUILD new file mode 100644 index 000000000..1996bdbd1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/gce/BUILD @@ -0,0 +1,23 @@ +package(default_visibility = ["//visibility:public"]) + +load("@io_k8s_repo_infra//defs:build.bzl", "release_filegroup") +load("@io_k8s_repo_infra//defs:pkg.bzl", "pkg_tar") + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//cluster/gce/addons:all-srcs", + "//cluster/gce/gci:all-srcs", + "//cluster/gce/manifests:all-srcs", + "//cluster/gce/windows:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/cluster/gce/OWNERS b/vendor/k8s.io/kubernetes/cluster/gce/OWNERS new file mode 100644 index 000000000..eef831683 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/gce/OWNERS @@ -0,0 +1,20 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - bowei + - gmarek + - jszczepkowski + - vishh + - mwielgus + - MaciekPytel + - jingax10 + - yujuhong +approvers: + - bowei + - gmarek + - jszczepkowski + - vishh + - mwielgus + - MaciekPytel + - jingax10 + - yujuhong diff --git a/vendor/k8s.io/kubernetes/cluster/gce/config-common.sh b/vendor/k8s.io/kubernetes/cluster/gce/config-common.sh new file mode 100644 index 000000000..910983d19 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/gce/config-common.sh @@ -0,0 +1,151 @@ +#!/usr/bin/env bash + +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Returns the total number of Linux and Windows nodes in the cluster. +# +# Vars assumed: +# NUM_NODES +# NUM_WINDOWS_NODES +function get-num-nodes { + echo "$((${NUM_NODES} + ${NUM_WINDOWS_NODES}))" +} + +# Vars assumed: +# NUM_NODES +# NUM_WINDOWS_NODES +function get-master-size { + local suggested_master_size=1 + if [[ "$(get-num-nodes)" -gt "5" ]]; then + suggested_master_size=2 + fi + if [[ "$(get-num-nodes)" -gt "10" ]]; then + suggested_master_size=4 + fi + if [[ "$(get-num-nodes)" -gt "100" ]]; then + suggested_master_size=8 + fi + if [[ "$(get-num-nodes)" -gt "250" ]]; then + suggested_master_size=16 + fi + if [[ "$(get-num-nodes)" -gt "500" ]]; then + suggested_master_size=32 + fi + if [[ "$(get-num-nodes)" -gt "3000" ]]; then + suggested_master_size=64 + fi + echo "${suggested_master_size}" +} + +# Vars assumed: +# NUM_NODES +# NUM_WINDOWS_NODES +function get-master-root-disk-size() { + local suggested_master_root_disk_size="20GB" + if [[ "$(get-num-nodes)" -gt "500" ]]; then + suggested_master_root_disk_size="100GB" + fi + if [[ "$(get-num-nodes)" -gt "3000" ]]; then + suggested_master_root_disk_size="500GB" + fi + echo "${suggested_master_root_disk_size}" +} + +# Vars assumed: +# NUM_NODES +# NUM_WINDOWS_NODES +function get-master-disk-size() { + local suggested_master_disk_size="20GB" + if [[ "$(get-num-nodes)" -gt "500" ]]; then + suggested_master_disk_size="100GB" + fi + if [[ "$(get-num-nodes)" -gt "3000" ]]; then + suggested_master_disk_size="200GB" + fi + echo "${suggested_master_disk_size}" +} + +function get-node-ip-range { + if [[ -n "${NODE_IP_RANGE:-}" ]]; then + >&2 echo "Using user provided NODE_IP_RANGE: ${NODE_IP_RANGE}" + echo "${NODE_IP_RANGE}" + return + fi + local suggested_range="10.40.0.0/22" + if [[ "$(get-num-nodes)" -gt 1000 ]]; then + suggested_range="10.40.0.0/21" + fi + if [[ "$(get-num-nodes)" -gt 2000 ]]; then + suggested_range="10.40.0.0/20" + fi + if [[ "$(get-num-nodes)" -gt 4000 ]]; then + suggested_range="10.40.0.0/19" + fi + echo "${suggested_range}" +} + +function get-cluster-ip-range { + local suggested_range="10.64.0.0/14" + if [[ "$(get-num-nodes)" -gt 1000 ]]; then + suggested_range="10.64.0.0/13" + fi + if [[ "$(get-num-nodes)" -gt 2000 ]]; then + suggested_range="10.64.0.0/12" + fi + if [[ "$(get-num-nodes)" -gt 4000 ]]; then + suggested_range="10.64.0.0/11" + fi + echo "${suggested_range}" +} + +# Calculate ip alias range based on max number of pods. +# Let pow be the smallest integer which is bigger or equal to log2($1 * 2). +# (32 - pow) will be returned. +# +# $1: The number of max pods limitation. +function get-alias-range-size() { + for pow in {0..31}; do + if (( 1 << $pow >= $1 * 2 )); then + echo $((32 - pow)) + return 0 + fi + done +} +# NOTE: Avoid giving nodes empty scopes, because kubelet needs a service account +# in order to initialize properly. +NODE_SCOPES="${NODE_SCOPES:-monitoring,logging-write,storage-ro}" + +# Root directory for Kubernetes files on Windows nodes. +WINDOWS_K8S_DIR="C:\etc\kubernetes" +# Directory where Kubernetes binaries will be installed on Windows nodes. +WINDOWS_NODE_DIR="${WINDOWS_K8S_DIR}\node\bin" +# Directory where Kubernetes log files will be stored on Windows nodes. +WINDOWS_LOGS_DIR="${WINDOWS_K8S_DIR}\logs" +# Directory where CNI binaries will be stored on Windows nodes. +WINDOWS_CNI_DIR="${WINDOWS_K8S_DIR}\cni" +# Directory where CNI config files will be stored on Windows nodes. +WINDOWS_CNI_CONFIG_DIR="${WINDOWS_K8S_DIR}\cni\config" +# Pod manifests directory for Windows nodes on Windows nodes. +WINDOWS_MANIFESTS_DIR="${WINDOWS_K8S_DIR}\manifests" +# Directory where cert/key files will be stores on Windows nodes. +WINDOWS_PKI_DIR="${WINDOWS_K8S_DIR}\pki" +# Path for kubelet config file on Windows nodes. +WINDOWS_KUBELET_CONFIG_FILE="${WINDOWS_K8S_DIR}\kubelet-config.yaml" +# Path for kubeconfig file on Windows nodes. +WINDOWS_KUBECONFIG_FILE="${WINDOWS_K8S_DIR}\kubelet.kubeconfig" +# Path for bootstrap kubeconfig file on Windows nodes. +WINDOWS_BOOTSTRAP_KUBECONFIG_FILE="${WINDOWS_K8S_DIR}\kubelet.bootstrap-kubeconfig" +# Path for kube-proxy kubeconfig file on Windows nodes. +WINDOWS_KUBEPROXY_KUBECONFIG_FILE="${WINDOWS_K8S_DIR}\kubeproxy.kubeconfig" diff --git a/vendor/k8s.io/kubernetes/cluster/gce/config-default.sh b/vendor/k8s.io/kubernetes/cluster/gce/config-default.sh new file mode 100755 index 000000000..a8ef5c0a9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/gce/config-default.sh @@ -0,0 +1,488 @@ +#!/usr/bin/env bash + +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO(jbeda): Provide a way to override project +# gcloud multiplexing for shared GCE/GKE tests. +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. +source "${KUBE_ROOT}/cluster/gce/config-common.sh" + +# Specifying KUBE_GCE_API_ENDPOINT will override the default GCE Compute API endpoint (https://www.googleapis.com/compute/v1/). +# This endpoint has to be pointing to v1 api. For example, https://www.googleapis.com/compute/staging_v1/ +GCE_API_ENDPOINT=${KUBE_GCE_API_ENDPOINT:-} +GCLOUD=gcloud +ZONE=${KUBE_GCE_ZONE:-us-central1-b} +REGION=${ZONE%-*} +RELEASE_REGION_FALLBACK=${RELEASE_REGION_FALLBACK:-false} +REGIONAL_KUBE_ADDONS=${REGIONAL_KUBE_ADDONS:-true} +NODE_SIZE=${NODE_SIZE:-n1-standard-2} +NUM_NODES=${NUM_NODES:-3} +NUM_WINDOWS_NODES=${NUM_WINDOWS_NODES:-0} +MASTER_SIZE=${MASTER_SIZE:-n1-standard-$(get-master-size)} +MASTER_MIN_CPU_ARCHITECTURE=${MASTER_MIN_CPU_ARCHITECTURE:-} # To allow choosing better architectures. +MASTER_DISK_TYPE=pd-ssd +MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-$(get-master-disk-size)} +MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-$(get-master-root-disk-size)} +NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard} +NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB} +NODE_LOCAL_SSDS=${NODE_LOCAL_SSDS:-0} +NODE_LABELS="${KUBE_NODE_LABELS:-}" +WINDOWS_NODE_LABELS="${WINDOWS_NODE_LABELS:-}" + +# An extension to local SSDs allowing users to specify block/fs and SCSI/NVMe devices +# Format of this variable will be "#,scsi/nvme,block/fs" you can specify multiple +# configurations by separating them by a semi-colon ex. "2,scsi,fs;1,nvme,block" +# is a request for 2 SCSI formatted and mounted SSDs and 1 NVMe block device SSD. +NODE_LOCAL_SSDS_EXT=${NODE_LOCAL_SSDS_EXT:-} +# Accelerators to be attached to each node. Format "type=,count=" +# More information on available GPUs here - https://cloud.google.com/compute/docs/gpus/ +NODE_ACCELERATORS=${NODE_ACCELERATORS:-""} +REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true} +PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false} +PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false} +KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true} +KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-} # default value calculated below +CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false} +MIG_WAIT_UNTIL_STABLE_TIMEOUT=${MIG_WAIT_UNTIL_STABLE_TIMEOUT:-1800} + +MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}} +NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}} +WINDOWS_NODE_OS_DISTRIBUTION=${WINDOWS_NODE_OS_DISTRIBUTION:-win1809} + +if [[ "${MASTER_OS_DISTRIBUTION}" == "cos" ]]; then + MASTER_OS_DISTRIBUTION="gci" +fi + +if [[ "${NODE_OS_DISTRIBUTION}" == "cos" ]]; then + NODE_OS_DISTRIBUTION="gci" +fi + +# GPUs supported in GCE do not have compatible drivers in Debian 7. +if [[ "${NODE_OS_DISTRIBUTION}" == "debian" ]]; then + NODE_ACCELERATORS="" +fi + +# By default a cluster will be started with the master and nodes +# on Container-optimized OS (cos, previously known as gci). If +# you are updating the os image versions, update this variable. +# Also please update corresponding image for node e2e at: +# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml +GCI_VERSION=${KUBE_GCI_VERSION:-cos-beta-73-11647-64-0} +MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-} +MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud} +NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}} +NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-cos-cloud} +NODE_SERVICE_ACCOUNT=${KUBE_GCE_NODE_SERVICE_ACCOUNT:-default} +CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker} +CONTAINER_RUNTIME_ENDPOINT=${KUBE_CONTAINER_RUNTIME_ENDPOINT:-} +CONTAINER_RUNTIME_NAME=${KUBE_CONTAINER_RUNTIME_NAME:-} +LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-} +# MASTER_EXTRA_METADATA is the extra instance metadata on master instance separated by commas. +MASTER_EXTRA_METADATA=${KUBE_MASTER_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}} +# MASTER_EXTRA_METADATA is the extra instance metadata on node instance separated by commas. +NODE_EXTRA_METADATA=${KUBE_NODE_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}} +# KUBELET_TEST_ARGS are extra arguments passed to kubelet. +KUBELET_TEST_ARGS=${KUBE_KUBELET_EXTRA_ARGS:-} + +NETWORK=${KUBE_GCE_NETWORK:-default} +# Enable network deletion by default (for kube-down), unless we're using 'default' network. +if [[ "${NETWORK}" == "default" ]]; then + KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-false} +else + KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-true} +fi +if [[ "${CREATE_CUSTOM_NETWORK}" == true ]]; then + SUBNETWORK="${SUBNETWORK:-${NETWORK}-custom-subnet}" +fi +INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-kubernetes}" +CLUSTER_NAME="${CLUSTER_NAME:-${INSTANCE_PREFIX}}" +MASTER_NAME="${INSTANCE_PREFIX}-master" +AGGREGATOR_MASTER_NAME="${INSTANCE_PREFIX}-aggregator" +INITIAL_ETCD_CLUSTER="${MASTER_NAME}" +MASTER_TAG="${INSTANCE_PREFIX}-master" +NODE_TAG="${INSTANCE_PREFIX}-minion" + +CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-$(get-cluster-ip-range)}" +MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" +# NODE_IP_RANGE is used when ENABLE_IP_ALIASES=true or CREATE_CUSTOM_NETWORK=true. +# It is the primary range in the subnet and is the range used for node instance IPs. +NODE_IP_RANGE="$(get-node-ip-range)" + +# NOTE: Avoid giving nodes empty scopes, because kubelet needs a service account +# in order to initialize properly. +NODE_SCOPES="${NODE_SCOPES:-monitoring,logging-write,storage-ro}" + +# Extra docker options for nodes. +EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-}" + +VOLUME_PLUGIN_DIR="${VOLUME_PLUGIN_DIR:-/home/kubernetes/flexvolume}" + +SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/16}" # formerly PORTAL_NET +ALLOCATE_NODE_CIDRS=true + +# When set to true, Docker Cache is enabled by default as part of the cluster bring up. +ENABLE_DOCKER_REGISTRY_CACHE=true + +# Optional: Deploy a L7 loadbalancer controller to fulfill Ingress requests: +# glbc - CE L7 Load Balancer Controller +ENABLE_L7_LOADBALANCING="${KUBE_ENABLE_L7_LOADBALANCING:-glbc}" + +# Optional: Cluster monitoring to setup as part of the cluster bring up: +# none - No cluster monitoring setup +# influxdb - Heapster, InfluxDB, and Grafana +# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging +# stackdriver - Heapster, Google Cloud Monitoring (schema container), and Google Cloud Logging +# googleinfluxdb - Enable influxdb and google (except GCM) +# standalone - Heapster only. Metrics available via Heapster REST API. +ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-standalone}" + +# Optional: Enable deploying separate prometheus stack for monitoring kubernetes cluster +ENABLE_PROMETHEUS_MONITORING="${KUBE_ENABLE_PROMETHEUS_MONITORING:-false}" + +# Optional: Enable Metrics Server. Metrics Server should be enable everywhere, +# since it's a critical component, but in the first release we need a way to disable +# this in case of stability issues. +# TODO(piosz) remove this option once Metrics Server became a stable thing. +ENABLE_METRICS_SERVER="${KUBE_ENABLE_METRICS_SERVER:-true}" + +# Optional: Metadata agent to setup as part of the cluster bring up: +# none - No metadata agent +# stackdriver - Stackdriver metadata agent +# Metadata agent is a daemon set that provides metadata of kubernetes objects +# running on the same node for exporting metrics and logs. +ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}" + +# One special node out of NUM_NODES would be created of this type if specified. +# Useful for scheduling heapster in large clusters with nodes of small size. +HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}" + +# NON_MASTER_NODE_LABELS are labels will only be applied on non-master nodes. +NON_MASTER_NODE_LABELS="${KUBE_NON_MASTER_NODE_LABELS:-}" +WINDOWS_NON_MASTER_NODE_LABELS="${WINDOWS_NON_MASTER_NODE_LABELS:-}" + +if [[ "${PREEMPTIBLE_MASTER}" == "true" ]]; then + NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-preemptible=true" + WINDOWS_NODE_LABELS="${WINDOWS_NODE_LABELS},cloud.google.com/gke-preemptible=true" +elif [[ "${PREEMPTIBLE_NODE}" == "true" ]]; then + NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-preemptible=true" + WINDOWS_NON_MASTER_NODE_LABELS="${WINDOWS_NON_MASTER_NODE_LABELS},cloud.google.com/gke-preemptible=true" +fi + +# To avoid running Calico on a node that is not configured appropriately, +# label each Node so that the DaemonSet can run the Pods only on ready Nodes. +# Windows nodes do not support Calico. +if [[ ${NETWORK_POLICY_PROVIDER:-} == "calico" ]]; then + NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}projectcalico.org/ds-ready=true" +fi + +# Optional: Enable netd. +ENABLE_NETD="${KUBE_ENABLE_NETD:-false}" +CUSTOM_NETD_YAML="${KUBE_CUSTOM_NETD_YAML:-}" +CUSTOM_CALICO_NODE_DAEMONSET_YAML="${KUBE_CUSTOM_CALICO_NODE_DAEMONSET_YAML:-}" +CUSTOM_TYPHA_DEPLOYMENT_YAML="${KUBE_CUSTOM_TYPHA_DEPLOYMENT_YAML:-}" + +# To avoid running netd on a node that is not configured appropriately, +# label each Node so that the DaemonSet can run the Pods only on ready Nodes. +# Windows nodes do not support netd. +if [[ ${ENABLE_NETD:-} == "true" ]]; then + NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}cloud.google.com/gke-netd-ready=true" +fi + +ENABLE_NODELOCAL_DNS="${KUBE_ENABLE_NODELOCAL_DNS:-false}" +LOCAL_DNS_IP="${KUBE_LOCAL_DNS_IP:-169.254.20.10}" + +# Enable metadata concealment by firewalling pod traffic to the metadata server +# and run a proxy daemonset on nodes. +# +# TODO(#8867) Enable by default. +ENABLE_METADATA_CONCEALMENT="${ENABLE_METADATA_CONCEALMENT:-false}" # true, false +METADATA_CONCEALMENT_NO_FIREWALL="${METADATA_CONCEALMENT_NO_FIREWALL:-false}" # true, false +if [[ ${ENABLE_METADATA_CONCEALMENT:-} == "true" ]]; then + # Put the necessary label on the node so the daemonset gets scheduled. + NODE_LABELS="${NODE_LABELS},cloud.google.com/metadata-proxy-ready=true" + # TODO(liggitt): remove this in v1.16 + NODE_LABELS="${NODE_LABELS},beta.kubernetes.io/metadata-proxy-ready=true" + # Add to the provider custom variables. + PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_METADATA_CONCEALMENT METADATA_CONCEALMENT_NO_FIREWALL" +fi + + +# Enable AESGCM encryption of secrets by default. +ENCRYPTION_PROVIDER_CONFIG="${ENCRYPTION_PROVIDER_CONFIG:-}" +if [[ -z "${ENCRYPTION_PROVIDER_CONFIG}" ]]; then + ENCRYPTION_PROVIDER_CONFIG=$(cat << EOM | base64 | tr -d '\r\n' +kind: EncryptionConfiguration +apiVersion: apiserver.config.k8s.io/v1 +resources: + - resources: + - secrets + providers: + - aesgcm: + keys: + - name: key1 + secret: $(dd if=/dev/urandom iflag=fullblock bs=32 count=1 2>/dev/null | base64 | tr -d '\r\n') +EOM +) +fi + +# Optional: Enable node logging. +ENABLE_NODE_LOGGING="${KUBE_ENABLE_NODE_LOGGING:-true}" +LOGGING_DESTINATION="${KUBE_LOGGING_DESTINATION:-gcp}" # options: elasticsearch, gcp + +# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up. +ENABLE_CLUSTER_LOGGING="${KUBE_ENABLE_CLUSTER_LOGGING:-true}" +ELASTICSEARCH_LOGGING_REPLICAS=1 + +# Optional: Don't require https for registries in our local RFC1918 network +if [[ ${KUBE_ENABLE_INSECURE_REGISTRY:-false} == "true" ]]; then + EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS} --insecure-registry 10.0.0.0/8" +fi + +# Optional: customize runtime config +RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}" + +if [[ "${KUBE_FEATURE_GATES:-}" == "AllAlpha=true" ]]; then + RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-api/all=true}" +fi + +# Optional: set feature gates +FEATURE_GATES="${KUBE_FEATURE_GATES:-ExperimentalCriticalPodAnnotation=true}" + +if [[ ! -z "${NODE_ACCELERATORS}" ]]; then + FEATURE_GATES="${FEATURE_GATES},DevicePlugins=true" + if [[ "${NODE_ACCELERATORS}" =~ .*type=([a-zA-Z0-9-]+).* ]]; then + NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}" + fi +fi + +# Optional: Install cluster DNS. +# Set CLUSTER_DNS_CORE_DNS to 'false' to install kube-dns instead of CoreDNS. +CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-true}" +ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" +DNS_SERVER_IP="${KUBE_DNS_SERVER_IP:-10.0.0.10}" +DNS_DOMAIN="${KUBE_DNS_DOMAIN:-cluster.local}" + +# Optional: Enable DNS horizontal autoscaler +ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}" + +# Optional: Install Kubernetes UI +ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}" + +# Optional: Install node problem detector. +# none - Not run node problem detector. +# daemonset - Run node problem detector as daemonset. +# standalone - Run node problem detector as standalone system daemon. +if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]]; then + # Enable standalone mode by default for gci. + ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-standalone}" +else + ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-daemonset}" +fi +NODE_PROBLEM_DETECTOR_VERSION="${NODE_PROBLEM_DETECTOR_VERSION:-}" +NODE_PROBLEM_DETECTOR_TAR_HASH="${NODE_PROBLEM_DETECTOR_TAR_HASH:-}" +NODE_PROBLEM_DETECTOR_RELEASE_PATH="${NODE_PROBLEM_DETECTOR_RELEASE_PATH:-}" +NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS="${NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS:-}" + +# Optional: Create autoscaler for cluster's nodes. +ENABLE_CLUSTER_AUTOSCALER="${KUBE_ENABLE_CLUSTER_AUTOSCALER:-false}" +if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then + AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-}" + AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-}" + AUTOSCALER_ENABLE_SCALE_DOWN="${KUBE_AUTOSCALER_ENABLE_SCALE_DOWN:-true}" + AUTOSCALER_EXPANDER_CONFIG="${KUBE_AUTOSCALER_EXPANDER_CONFIG:---expander=price}" +fi + +# Optional: Enable allocation of pod IPs using IP aliases. +# +# BETA FEATURE. +# +# IP_ALIAS_SIZE is the size of the podCIDR allocated to a node. +# IP_ALIAS_SUBNETWORK is the subnetwork to allocate from. If empty, a +# new subnetwork will be created for the cluster. +ENABLE_IP_ALIASES=${KUBE_GCE_ENABLE_IP_ALIASES:-false} +NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-RangeAllocator} +if [ ${ENABLE_IP_ALIASES} = true ]; then + # Number of Pods that can run on this node. + MAX_PODS_PER_NODE=${MAX_PODS_PER_NODE:-110} + # Size of ranges allocated to each node. + IP_ALIAS_SIZE="/$(get-alias-range-size ${MAX_PODS_PER_NODE})" + IP_ALIAS_SUBNETWORK=${KUBE_GCE_IP_ALIAS_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-default} + # If we're using custom network, use the subnet we already create for it as the one for ip-alias. + # Note that this means SUBNETWORK would override KUBE_GCE_IP_ALIAS_SUBNETWORK in case of custom network. + if [[ "${CREATE_CUSTOM_NETWORK}" == true ]]; then + IP_ALIAS_SUBNETWORK="${SUBNETWORK}" + fi + # Reserve the services IP space to avoid being allocated for other GCP resources. + SERVICE_CLUSTER_IP_SUBNETWORK=${KUBE_GCE_SERVICE_CLUSTER_IP_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-services} + NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-CloudAllocator} + SECONDARY_RANGE_NAME=${SECONDARY_RANGE_NAME:-} + # Add to the provider custom variables. + PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_IP_ALIASES" + PROVIDER_VARS="${PROVIDER_VARS:-} NODE_IPAM_MODE" + PROVIDER_VARS="${PROVIDER_VARS:-} SECONDARY_RANGE_NAME" +elif [[ -n "${MAX_PODS_PER_NODE:-}" ]]; then + # Should not have MAX_PODS_PER_NODE set for route-based clusters. + echo -e "${color_red}Cannot set MAX_PODS_PER_NODE for route-based projects for ${PROJECT}." >&2 + exit 1 +fi + +# Enable GCE Alpha features. +if [[ -n "${GCE_ALPHA_FEATURES:-}" ]]; then + PROVIDER_VARS="${PROVIDER_VARS:-} GCE_ALPHA_FEATURES" +fi + +# Disable Docker live-restore. +if [[ -n "${DISABLE_DOCKER_LIVE_RESTORE:-}" ]]; then + PROVIDER_VARS="${PROVIDER_VARS:-} DISABLE_DOCKER_LIVE_RESTORE" +fi + +# Override default GLBC image +if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then + PROVIDER_VARS="${PROVIDER_VARS:-} GCE_GLBC_IMAGE" +fi +CUSTOM_INGRESS_YAML="${CUSTOM_INGRESS_YAML:-}" + +# Admission Controllers to invoke prior to persisting objects in cluster +ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection + +if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then + ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy" +fi + +# MutatingAdmissionWebhook should be the last controller that modifies the +# request object, otherwise users will be confused if the mutating webhooks' +# modification is overwritten. +ADMISSION_CONTROL="${ADMISSION_CONTROL},MutatingAdmissionWebhook,ValidatingAdmissionWebhook" + +# ResourceQuota must come last, or a creation is recorded, but the pod was forbidden. +ADMISSION_CONTROL="${ADMISSION_CONTROL},ResourceQuota" + +# Optional: if set to true kube-up will automatically check for existing resources and clean them up. +KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false} + +# Storage backend. 'etcd2' supported, 'etcd3' experimental. +STORAGE_BACKEND=${STORAGE_BACKEND:-} + +# Networking plugin specific settings. +NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, kubenet + +# Network Policy plugin specific settings. +NETWORK_POLICY_PROVIDER="${NETWORK_POLICY_PROVIDER:-none}" # calico + +NON_MASQUERADE_CIDR="0.0.0.0/0" + +# How should the kubelet configure hairpin mode? +HAIRPIN_MODE="${HAIRPIN_MODE:-hairpin-veth}" # promiscuous-bridge, hairpin-veth, none +# Optional: if set to true, kube-up will configure the cluster to run e2e tests. +E2E_STORAGE_TEST_ENVIRONMENT="${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}" + +# Evict pods whenever compute resource availability on the nodes gets below a threshold. +EVICTION_HARD="${EVICTION_HARD:-memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%}" + +# Optional: custom scheduling algorithm +SCHEDULING_ALGORITHM_PROVIDER="${SCHEDULING_ALGORITHM_PROVIDER:-}" + +# Optional: install a default StorageClass +ENABLE_DEFAULT_STORAGE_CLASS="${ENABLE_DEFAULT_STORAGE_CLASS:-true}" + +# Optional: Enable legacy ABAC policy that makes all service accounts superusers. +ENABLE_LEGACY_ABAC="${ENABLE_LEGACY_ABAC:-false}" # true, false + +# Indicates if the values (i.e. KUBE_USER and KUBE_PASSWORD for basic +# authentication) in metadata should be treated as canonical, and therefore disk +# copies ought to be recreated/clobbered. +METADATA_CLOBBERS_CONFIG="${METADATA_CLOBBERS_CONFIG:-false}" + +ENABLE_BIG_CLUSTER_SUBNETS="${ENABLE_BIG_CLUSTER_SUBNETS:-false}" + +if [[ -n "${LOGROTATE_FILES_MAX_COUNT:-}" ]]; then + PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_FILES_MAX_COUNT" +fi +if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then + PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_MAX_SIZE" +fi + +if [[ -n "${POD_LOG_MAX_FILE:-}" ]]; then + PROVIDER_VARS="${PROVIDER_VARS:-} POD_LOG_MAX_FILE" +fi + +if [[ -n "${POD_LOG_MAX_SIZE:-}" ]]; then + PROVIDER_VARS="${PROVIDER_VARS:-} POD_LOG_MAX_SIZE" +fi + +# Fluentd requirements +# YAML exists to trigger a configuration refresh when changes are made. +FLUENTD_GCP_YAML_VERSION="v3.2.0" +FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.6-1.6.0-1}" +FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}" +FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}" +FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}" + +# Heapster requirements +HEAPSTER_GCP_BASE_MEMORY="${HEAPSTER_GCP_BASE_MEMORY:-140Mi}" +HEAPSTER_GCP_MEMORY_PER_NODE="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}" +HEAPSTER_GCP_BASE_CPU="${HEAPSTER_GCP_BASE_CPU:-80m}" +HEAPSTER_GCP_CPU_PER_NODE="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}" + +# Optional: custom system banner for dashboard addon +CUSTOM_KUBE_DASHBOARD_BANNER="${CUSTOM_KUBE_DASHBOARD_BANNER:-}" + +# Default Stackdriver resources version exported by Fluentd-gcp addon +LOGGING_STACKDRIVER_RESOURCE_TYPES="${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}" + +# Adding to PROVIDER_VARS, since this is GCP-specific. +PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_YAML_VERSION FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE CUSTOM_KUBE_DASHBOARD_BANNER LOGGING_STACKDRIVER_RESOURCE_TYPES" + +# Fluentd configuration for node-journal +ENABLE_NODE_JOURNAL="${ENABLE_NODE_JOURNAL:-false}" + +# prometheus-to-sd configuration +PROMETHEUS_TO_SD_ENDPOINT="${PROMETHEUS_TO_SD_ENDPOINT:-https://monitoring.googleapis.com/}" +PROMETHEUS_TO_SD_PREFIX="${PROMETHEUS_TO_SD_PREFIX:-custom.googleapis.com}" +ENABLE_PROMETHEUS_TO_SD="${ENABLE_PROMETHEUS_TO_SD:-false}" + +# TODO(#51292): Make kube-proxy Daemonset default and remove the configuration here. +# Optional: [Experiment Only] Run kube-proxy as a DaemonSet if set to true, run as static pods otherwise. +KUBE_PROXY_DAEMONSET="${KUBE_PROXY_DAEMONSET:-false}" # true, false + +# Optional: duration of cluster signed certificates. +CLUSTER_SIGNING_DURATION="${CLUSTER_SIGNING_DURATION:-}" + +# Optional: enable pod priority +ENABLE_POD_PRIORITY="${ENABLE_POD_PRIORITY:-}" +if [[ "${ENABLE_POD_PRIORITY}" == "true" ]]; then + FEATURE_GATES="${FEATURE_GATES},PodPriority=true" +fi + +# Optional: enable certificate rotation of the kubelet certificates. +ROTATE_CERTIFICATES="${ROTATE_CERTIFICATES:-}" + +# The number of services that are allowed to sync concurrently. Will be passed +# into kube-controller-manager via `--concurrent-service-syncs` +CONCURRENT_SERVICE_SYNCS="${CONCURRENT_SERVICE_SYNCS:-}" + +SERVICEACCOUNT_ISSUER="https://kubernetes.io/${CLUSTER_NAME}" + +# Optional: Enable Node termination Handler for Preemptible and GPU VMs. +# https://github.com/GoogleCloudPlatform/k8s-node-termination-handler +ENABLE_NODE_TERMINATION_HANDLER="${ENABLE_NODE_TERMINATION_HANDLER:-false}" +# Override default Node Termination Handler Image +if [[ "${NODE_TERMINATION_HANDLER_IMAGE:-}" ]]; then + PROVIDER_VARS="${PROVIDER_VARS:-} NODE_TERMINATION_HANDLER_IMAGE" +fi + +# Taint Windows nodes by default to prevent Linux workloads from being +# scheduled onto them. +WINDOWS_NODE_TAINTS="${WINDOWS_NODE_TAINTS:-node.kubernetes.io/os=win1809:NoSchedule}" diff --git a/vendor/k8s.io/kubernetes/cluster/gce/gci/BUILD b/vendor/k8s.io/kubernetes/cluster/gce/gci/BUILD new file mode 100644 index 000000000..9c205985e --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/gce/gci/BUILD @@ -0,0 +1,71 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") +load("@io_k8s_repo_infra//defs:pkg.bzl", "pkg_tar") +load("@io_k8s_repo_infra//defs:build.bzl", "release_filegroup") + +go_test( + name = "go_default_test", + srcs = [ + "apiserver_manifest_test.go", + "configure_helper_test.go", + ], + data = [ + ":scripts-test-data", + "//cluster/gce/manifests", + ], + deps = [ + "//pkg/api/legacyscheme:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) + +# Having the COS code from the GCE cluster deploy hosted with the release is +# useful for GKE. This list should match the list in +# kubernetes/release/lib/releaselib.sh. +release_filegroup( + name = "gcs-release-artifacts", + srcs = [ + "configure.sh", + "master.yaml", + "node.yaml", + "shutdown.sh", + ], + visibility = ["//visibility:public"], +) + +pkg_tar( + name = "gci-trusty-manifests", + srcs = glob(["gke-internal-configure-helper.sh"]), + files = { + "//cluster/gce/gci/mounter": "gci-mounter", + "configure-helper.sh": "gci-configure-helper.sh", + "health-monitor.sh": "health-monitor.sh", + }, + mode = "0755", + strip_prefix = ".", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//cluster/gce/gci/mounter:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +filegroup( + name = "scripts-test-data", + srcs = [ + "configure-helper.sh", + ], +) diff --git a/vendor/k8s.io/kubernetes/cluster/gce/gci/OWNERS b/vendor/k8s.io/kubernetes/cluster/gce/gci/OWNERS new file mode 100644 index 000000000..652ffcee3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/gce/gci/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- dchen1107 +- filbranden +- yguo0905 diff --git a/vendor/k8s.io/kubernetes/cluster/gce/gci/helper.sh b/vendor/k8s.io/kubernetes/cluster/gce/gci/helper.sh new file mode 100755 index 000000000..11dfd7b85 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/gce/gci/helper.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A library of helper functions and constant for GCI distro + +# Creates the GCI specific metadata files if they do not exit. +# Assumed var +# KUBE_TEMP +function ensure-gci-metadata-files { + if [[ ! -f "${KUBE_TEMP}/gci-update.txt" ]]; then + echo -n "update_disabled" > "${KUBE_TEMP}/gci-update.txt" + fi + if [[ ! -f "${KUBE_TEMP}/gci-ensure-gke-docker.txt" ]]; then + echo -n "true" > "${KUBE_TEMP}/gci-ensure-gke-docker.txt" + fi + if [[ ! -f "${KUBE_TEMP}/gci-docker-version.txt" ]]; then + echo -n "${GCI_DOCKER_VERSION:-}" > "${KUBE_TEMP}/gci-docker-version.txt" + fi +} diff --git a/vendor/k8s.io/kubernetes/cluster/gce/gci/master-helper.sh b/vendor/k8s.io/kubernetes/cluster/gce/gci/master-helper.sh new file mode 100755 index 000000000..04225afc3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/gce/gci/master-helper.sh @@ -0,0 +1,177 @@ +#!/usr/bin/env bash + +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A library of helper functions and constant for GCI distro +source "${KUBE_ROOT}/cluster/gce/gci/helper.sh" + +# create-master-instance creates the master instance. If called with +# an argument, the argument is used as the name to a reserved IP +# address for the master. (In the case of upgrade/repair, we re-use +# the same IP.) +# +# It requires a whole slew of assumed variables, partially due to to +# the call to write-master-env. Listing them would be rather +# futile. Instead, we list the required calls to ensure any additional +# +# variables are set: +# ensure-temp-dir +# detect-project +# get-bearer-token +function create-master-instance { + local address="" + [[ -n ${1:-} ]] && address="${1}" + + write-master-env + ensure-gci-metadata-files + create-master-instance-internal "${MASTER_NAME}" "${address}" +} + +function replicate-master-instance() { + local existing_master_zone="${1}" + local existing_master_name="${2}" + local existing_master_replicas="${3}" + + local kube_env="$(get-metadata "${existing_master_zone}" "${existing_master_name}" kube-env)" + # Substitute INITIAL_ETCD_CLUSTER to enable etcd clustering. + kube_env="$(echo "${kube_env}" | grep -v "INITIAL_ETCD_CLUSTER")" + kube_env="$(echo -e "${kube_env}\nINITIAL_ETCD_CLUSTER: '${existing_master_replicas},${REPLICA_NAME}'")" + + # Substitute INITIAL_ETCD_CLUSTER_STATE + kube_env="$(echo "${kube_env}" | grep -v "INITIAL_ETCD_CLUSTER_STATE")" + kube_env="$(echo -e "${kube_env}\nINITIAL_ETCD_CLUSTER_STATE: 'existing'")" + + ETCD_CA_KEY="$(echo "${kube_env}" | grep "ETCD_CA_KEY" | sed "s/^.*: '//" | sed "s/'$//")" + ETCD_CA_CERT="$(echo "${kube_env}" | grep "ETCD_CA_CERT" | sed "s/^.*: '//" | sed "s/'$//")" + create-etcd-certs "${REPLICA_NAME}" "${ETCD_CA_CERT}" "${ETCD_CA_KEY}" + + kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_KEY")" + kube_env="$(echo -e "${kube_env}\nETCD_PEER_KEY: '${ETCD_PEER_KEY_BASE64}'")" + kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_CERT")" + kube_env="$(echo -e "${kube_env}\nETCD_PEER_CERT: '${ETCD_PEER_CERT_BASE64}'")" + + ETCD_APISERVER_CA_KEY="$(echo "${kube_env}" | grep "ETCD_APISERVER_CA_KEY" | sed "s/^.*: '//" | sed "s/'$//")" + ETCD_APISERVER_CA_CERT="$(echo "${kube_env}" | grep "ETCD_APISERVER_CA_CERT" | sed "s/^.*: '//" | sed "s/'$//")" + create-etcd-apiserver-certs "etcd-${REPLICA_NAME}" "${REPLICA_NAME}" "${ETCD_APISERVER_CA_CERT}" "${ETCD_APISERVER_CA_KEY}" + + kube_env="$(echo "${kube_env}" | grep -v "ETCD_APISERVER_SERVER_KEY")" + kube_env="$(echo -e "${kube_env}\nETCD_APISERVER_SERVER_KEY: '${ETCD_APISERVER_SERVER_KEY_BASE64}'")" + kube_env="$(echo "${kube_env}" | grep -v "ETCD_APISERVER_SERVER_CERT")" + kube_env="$(echo -e "${kube_env}\nETCD_APISERVER_SERVER_CERT: '${ETCD_APISERVER_SERVER_CERT_BASE64}'")" + kube_env="$(echo "${kube_env}" | grep -v "ETCD_APISERVER_CLIENT_KEY")" + kube_env="$(echo -e "${kube_env}\nETCD_APISERVER_CLIENT_KEY: '${ETCD_APISERVER_CLIENT_KEY_BASE64}'")" + kube_env="$(echo "${kube_env}" | grep -v "ETCD_APISERVER_CLIENT_CERT")" + kube_env="$(echo -e "${kube_env}\nETCD_APISERVER_CLIENT_CERT: '${ETCD_APISERVER_CLIENT_CERT_BASE64}'")" + + echo "${kube_env}" > ${KUBE_TEMP}/master-kube-env.yaml + get-metadata "${existing_master_zone}" "${existing_master_name}" cluster-name > "${KUBE_TEMP}/cluster-name.txt" + get-metadata "${existing_master_zone}" "${existing_master_name}" gci-update-strategy > "${KUBE_TEMP}/gci-update.txt" + get-metadata "${existing_master_zone}" "${existing_master_name}" gci-ensure-gke-docker > "${KUBE_TEMP}/gci-ensure-gke-docker.txt" + get-metadata "${existing_master_zone}" "${existing_master_name}" gci-docker-version > "${KUBE_TEMP}/gci-docker-version.txt" + get-metadata "${existing_master_zone}" "${existing_master_name}" kube-master-certs > "${KUBE_TEMP}/kube-master-certs.yaml" + get-metadata "${existing_master_zone}" "${existing_master_name}" cluster-location > "${KUBE_TEMP}/cluster-location.txt" + + create-master-instance-internal "${REPLICA_NAME}" +} + + +function create-master-instance-internal() { + local gcloud="gcloud" + local retries=5 + local sleep_sec=10 + if [[ "${MASTER_SIZE##*-}" -ge 64 ]]; then # remove everything up to last dash (inclusive) + # Workaround for #55777 + retries=30 + sleep_sec=60 + fi + + local -r master_name="${1}" + local -r address="${2:-}" + + local preemptible_master="" + if [[ "${PREEMPTIBLE_MASTER:-}" == "true" ]]; then + preemptible_master="--preemptible --maintenance-policy TERMINATE" + fi + + local enable_ip_aliases + if [[ "${NODE_IPAM_MODE:-}" == "CloudAllocator" ]]; then + enable_ip_aliases=true + else + enable_ip_aliases=false + fi + + local network=$(make-gcloud-network-argument \ + "${NETWORK_PROJECT}" "${REGION}" "${NETWORK}" "${SUBNETWORK:-}" \ + "${address:-}" "${enable_ip_aliases:-}" "${IP_ALIAS_SIZE:-}") + + local metadata="kube-env=${KUBE_TEMP}/master-kube-env.yaml" + metadata="${metadata},kubelet-config=${KUBE_TEMP}/master-kubelet-config.yaml" + metadata="${metadata},user-data=${KUBE_ROOT}/cluster/gce/gci/master.yaml" + metadata="${metadata},configure-sh=${KUBE_ROOT}/cluster/gce/gci/configure.sh" + metadata="${metadata},cluster-location=${KUBE_TEMP}/cluster-location.txt" + metadata="${metadata},cluster-name=${KUBE_TEMP}/cluster-name.txt" + metadata="${metadata},gci-update-strategy=${KUBE_TEMP}/gci-update.txt" + metadata="${metadata},gci-ensure-gke-docker=${KUBE_TEMP}/gci-ensure-gke-docker.txt" + metadata="${metadata},gci-docker-version=${KUBE_TEMP}/gci-docker-version.txt" + metadata="${metadata},kube-master-certs=${KUBE_TEMP}/kube-master-certs.yaml" + metadata="${metadata},cluster-location=${KUBE_TEMP}/cluster-location.txt" + metadata="${metadata},${MASTER_EXTRA_METADATA}" + + local disk="name=${master_name}-pd" + disk="${disk},device-name=master-pd" + disk="${disk},mode=rw" + disk="${disk},boot=no" + disk="${disk},auto-delete=no" + + for attempt in $(seq 1 ${retries}); do + if result=$(${gcloud} compute instances create "${master_name}" \ + --project "${PROJECT}" \ + --zone "${ZONE}" \ + --machine-type "${MASTER_SIZE}" \ + --image-project="${MASTER_IMAGE_PROJECT}" \ + --image "${MASTER_IMAGE}" \ + --tags "${MASTER_TAG}" \ + --scopes "storage-ro,compute-rw,monitoring,logging-write" \ + --metadata-from-file "${metadata}" \ + --disk "${disk}" \ + --boot-disk-size "${MASTER_ROOT_DISK_SIZE}" \ + ${MASTER_MIN_CPU_ARCHITECTURE:+"--min-cpu-platform=${MASTER_MIN_CPU_ARCHITECTURE}"} \ + ${preemptible_master} \ + ${network} 2>&1); then + echo "${result}" >&2 + return 0 + else + echo "${result}" >&2 + if [[ ! "${result}" =~ "try again later" ]]; then + echo "Failed to create master instance due to non-retryable error" >&2 + return 1 + fi + sleep $sleep_sec + fi + done + + echo "Failed to create master instance despite ${retries} attempts" >&2 + return 1 +} + +function get-metadata() { + local zone="${1}" + local name="${2}" + local key="${3}" + gcloud compute ssh "${name}" \ + --project "${PROJECT}" \ + --zone "${zone}" \ + --command "curl \"http://metadata.google.internal/computeMetadata/v1/instance/attributes/${key}\" -H \"Metadata-Flavor: Google\"" 2>/dev/null +} diff --git a/vendor/k8s.io/kubernetes/cluster/gce/gci/node-helper.sh b/vendor/k8s.io/kubernetes/cluster/gce/gci/node-helper.sh new file mode 100755 index 000000000..6cd72ec8c --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/gce/gci/node-helper.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A library of helper functions and constant for GCI distro +source "${KUBE_ROOT}/cluster/gce/gci/helper.sh" + +function get-node-instance-metadata-from-file { + local metadata="" + metadata+="kube-env=${KUBE_TEMP}/node-kube-env.yaml," + metadata+="kubelet-config=${KUBE_TEMP}/node-kubelet-config.yaml," + metadata+="user-data=${KUBE_ROOT}/cluster/gce/gci/node.yaml," + metadata+="configure-sh=${KUBE_ROOT}/cluster/gce/gci/configure.sh," + metadata+="cluster-location=${KUBE_TEMP}/cluster-location.txt," + metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt," + metadata+="gci-update-strategy=${KUBE_TEMP}/gci-update.txt," + metadata+="gci-ensure-gke-docker=${KUBE_TEMP}/gci-ensure-gke-docker.txt," + metadata+="gci-docker-version=${KUBE_TEMP}/gci-docker-version.txt," + metadata+="shutdown-script=${KUBE_ROOT}/cluster/gce/gci/shutdown.sh," + metadata+="${NODE_EXTRA_METADATA}" + echo "${metadata}" +} + +# Assumed vars: +# scope_flags +# Parameters: +# $1: template name (required). +function create-linux-node-instance-template { + local template_name="$1" + ensure-gci-metadata-files + # shellcheck disable=2154 # 'scope_flags' is assigned by upstream + create-node-template "${template_name}" "${scope_flags[*]}" "$(get-node-instance-metadata-from-file)" "" "linux" +} diff --git a/vendor/k8s.io/kubernetes/cluster/gce/util.sh b/vendor/k8s.io/kubernetes/cluster/gce/util.sh new file mode 100755 index 000000000..b8ea00988 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/gce/util.sh @@ -0,0 +1,3521 @@ +#!/usr/bin/env bash + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A library of helper functions and constant for the local config. + +# Use the config file specified in $KUBE_CONFIG_FILE, or default to +# config-default.sh. +readonly GCE_MAX_LOCAL_SSD=8 + +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. +source "${KUBE_ROOT}/cluster/gce/${KUBE_CONFIG_FILE-"config-default.sh"}" +source "${KUBE_ROOT}/cluster/common.sh" +source "${KUBE_ROOT}/hack/lib/util.sh" + +if [[ "${NODE_OS_DISTRIBUTION}" == "gci" || "${NODE_OS_DISTRIBUTION}" == "ubuntu" || "${NODE_OS_DISTRIBUTION}" == "custom" ]]; then + source "${KUBE_ROOT}/cluster/gce/${NODE_OS_DISTRIBUTION}/node-helper.sh" +else + echo "Cannot operate on cluster using node os distro: ${NODE_OS_DISTRIBUTION}" >&2 + exit 1 +fi + +source "${KUBE_ROOT}/cluster/gce/windows/node-helper.sh" + +if [[ "${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" || "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]]; then + source "${KUBE_ROOT}/cluster/gce/${MASTER_OS_DISTRIBUTION}/master-helper.sh" +else + echo "Cannot operate on cluster using master os distro: ${MASTER_OS_DISTRIBUTION}" >&2 + exit 1 +fi + +if [[ ${NODE_LOCAL_SSDS:-} -ge 1 ]] && [[ ! -z ${NODE_LOCAL_SSDS_EXT:-} ]] ; then + echo -e "${color_red}Local SSD: Only one of NODE_LOCAL_SSDS and NODE_LOCAL_SSDS_EXT can be specified at once${color_norm}" >&2 + exit 2 +fi + +if [[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]]; then + DEFAULT_GCI_PROJECT=google-containers + if [[ "${GCI_VERSION}" == "cos"* ]]; then + DEFAULT_GCI_PROJECT=cos-cloud + fi + MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-${DEFAULT_GCI_PROJECT}} + # If the master image is not set, we use the latest GCI image. + # Otherwise, we respect whatever is set by the user. + MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-${GCI_VERSION}} +fi + +# Sets node image based on the specified os distro. Currently this function only +# supports gci and debian. +# +# Requires: +# NODE_OS_DISTRIBUTION +# Sets: +# DEFAULT_GCI_PROJECT +# NODE_IMAGE +# NODE_IMAGE_PROJECT +function set-linux-node-image() { + if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]]; then + DEFAULT_GCI_PROJECT=google-containers + if [[ "${GCI_VERSION}" == "cos"* ]]; then + DEFAULT_GCI_PROJECT=cos-cloud + fi + + # If the node image is not set, we use the latest GCI image. + # Otherwise, we respect whatever is set by the user. + NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}} + NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-${DEFAULT_GCI_PROJECT}} + fi +} + +# Requires: +# WINDOWS_NODE_OS_DISTRIBUTION +# Sets: +# WINDOWS_NODE_IMAGE_FAMILY +# WINDOWS_NODE_IMAGE_PROJECT +function set-windows-node-image() { + WINDOWS_NODE_IMAGE_PROJECT="windows-cloud" + if [[ "${WINDOWS_NODE_OS_DISTRIBUTION}" == "win2019" ]]; then + WINDOWS_NODE_IMAGE_FAMILY="windows-2019-core-for-containers" + elif [[ "${WINDOWS_NODE_OS_DISTRIBUTION}" == "win1809" ]]; then + WINDOWS_NODE_IMAGE_FAMILY="windows-1809-core-for-containers" + else + echo "Unknown WINDOWS_NODE_OS_DISTRIBUTION ${WINDOWS_NODE_OS_DISTRIBUTION}" >&2 + exit 1 + fi +} + +set-linux-node-image +set-windows-node-image + +# Verify cluster autoscaler configuration. +if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then + if [[ -z $AUTOSCALER_MIN_NODES ]]; then + echo "AUTOSCALER_MIN_NODES not set." + exit 1 + fi + if [[ -z $AUTOSCALER_MAX_NODES ]]; then + echo "AUTOSCALER_MAX_NODES not set." + exit 1 + fi +fi + +# These prefixes must not be prefixes of each other, so that they can be used to +# detect mutually exclusive sets of nodes. +NODE_INSTANCE_PREFIX=${NODE_INSTANCE_PREFIX:-"${INSTANCE_PREFIX}-minion"} +WINDOWS_NODE_INSTANCE_PREFIX=${WINDOWS_NODE_INSTANCE_PREFIX:-"${INSTANCE_PREFIX}-windows-node"} + +NODE_TAGS="${NODE_TAG}" + +ALLOCATE_NODE_CIDRS=true +PREEXISTING_NETWORK=false +PREEXISTING_NETWORK_MODE="" + +KUBE_PROMPT_FOR_UPDATE=${KUBE_PROMPT_FOR_UPDATE:-"n"} +# How long (in seconds) to wait for cluster initialization. +KUBE_CLUSTER_INITIALIZATION_TIMEOUT=${KUBE_CLUSTER_INITIALIZATION_TIMEOUT:-300} + +function join_csv() { + local IFS=','; echo "$*"; +} + +# This function returns the first string before the comma +function split_csv() { + echo "$*" | cut -d',' -f1 +} + +# Verify prereqs +function verify-prereqs() { + local cmd + + # we use openssl to generate certs + kube::util::test_openssl_installed + + # ensure a version supported by easyrsa is installed + if [ "$(openssl version | cut -d\ -f1)" == "LibreSSL" ]; then + echo "LibreSSL is not supported. Please ensure openssl points to an OpenSSL binary" + if [ "$(uname -s)" == "Darwin" ]; then + echo 'On macOS we recommend using homebrew and adding "$(brew --prefix openssl)/bin" to your PATH' + fi + exit 1 + fi + + # we use gcloud to create the cluster, gsutil to stage binaries and data + for cmd in gcloud gsutil; do + if ! which "${cmd}" >/dev/null; then + local resp="n" + if [[ "${KUBE_PROMPT_FOR_UPDATE}" == "y" ]]; then + echo "Can't find ${cmd} in PATH. Do you wish to install the Google Cloud SDK? [Y/n]" + read resp + fi + if [[ "${resp}" != "n" && "${resp}" != "N" ]]; then + curl https://sdk.cloud.google.com | bash + fi + if ! which "${cmd}" >/dev/null; then + echo "Can't find ${cmd} in PATH, please fix and retry. The Google Cloud " >&2 + echo "SDK can be downloaded from https://cloud.google.com/sdk/." >&2 + exit 1 + fi + fi + done + update-or-verify-gcloud +} + +# Use the gcloud defaults to find the project. If it is already set in the +# environment then go with that. +# +# Vars set: +# PROJECT +# NETWORK_PROJECT +# PROJECT_REPORTED +function detect-project() { + if [[ -z "${PROJECT-}" ]]; then + PROJECT=$(gcloud config list project --format 'value(core.project)') + fi + + NETWORK_PROJECT=${NETWORK_PROJECT:-${PROJECT}} + + if [[ -z "${PROJECT-}" ]]; then + echo "Could not detect Google Cloud Platform project. Set the default project using " >&2 + echo "'gcloud config set project '" >&2 + exit 1 + fi + if [[ -z "${PROJECT_REPORTED-}" ]]; then + echo "Project: ${PROJECT}" >&2 + echo "Network Project: ${NETWORK_PROJECT}" >&2 + echo "Zone: ${ZONE}" >&2 + PROJECT_REPORTED=true + fi +} + +# Use gsutil to get the md5 hash for a particular tar +function gsutil_get_tar_md5() { + # location_tar could be local or in the cloud + # local tar_location example ./_output/release-tars/kubernetes-server-linux-amd64.tar.gz + # cloud tar_location example gs://kubernetes-staging-PROJECT/kubernetes-devel/kubernetes-server-linux-amd64.tar.gz + local -r tar_location=$1 + #parse the output and return the md5 hash + #the sed command at the end removes whitespace + local -r tar_md5=$(gsutil hash -h -m ${tar_location} 2>/dev/null | grep "Hash (md5):" | awk -F ':' '{print $2}' | sed 's/^[[:space:]]*//g') + echo "${tar_md5}" +} + +# Copy a release tar and its accompanying hash. +function copy-to-staging() { + local -r staging_path=$1 + local -r gs_url=$2 + local -r tar=$3 + local -r hash=$4 + local -r basename_tar=$(basename ${tar}) + + #check whether this tar alread exists and has the same hash + #if it matches, then don't bother uploading it again + + #remote_tar_md5 checks the remote location for the existing tarball and its md5 + #staging_path example gs://kubernetes-staging-PROJECT/kubernetes-devel + #basename_tar example kubernetes-server-linux-amd64.tar.gz + local -r remote_tar_md5=$(gsutil_get_tar_md5 "${staging_path}/${basename_tar}") + if [[ -n ${remote_tar_md5} ]]; then + #local_tar_md5 checks the remote location for the existing tarball and its md5 hash + #tar example ./_output/release-tars/kubernetes-server-linux-amd64.tar.gz + local -r local_tar_md5=$(gsutil_get_tar_md5 "${tar}") + if [[ "${remote_tar_md5}" == "${local_tar_md5}" ]]; then + echo "+++ ${basename_tar} uploaded earlier, cloud and local file md5 match (md5 = ${local_tar_md5})" + return 0 + fi + fi + + echo "${hash}" > "${tar}.sha1" + gsutil -m -q -h "Cache-Control:private, max-age=0" cp "${tar}" "${tar}.sha1" "${staging_path}" + gsutil -m acl ch -g all:R "${gs_url}" "${gs_url}.sha1" >/dev/null 2>&1 + echo "+++ ${basename_tar} uploaded (sha1 = ${hash})" +} + + +# Given the cluster zone, return the list of regional GCS release +# bucket suffixes for the release in preference order. GCS doesn't +# give us an API for this, so we hardcode it. +# +# Assumed vars: +# RELEASE_REGION_FALLBACK +# REGIONAL_KUBE_ADDONS +# ZONE +# Vars set: +# PREFERRED_REGION +function set-preferred-region() { + case ${ZONE} in + asia-*) + PREFERRED_REGION=("asia" "us" "eu") + ;; + europe-*) + PREFERRED_REGION=("eu" "us" "asia") + ;; + *) + PREFERRED_REGION=("us" "eu" "asia") + ;; + esac + + if [[ "${RELEASE_REGION_FALLBACK}" != "true" ]]; then + PREFERRED_REGION=( "${PREFERRED_REGION[0]}" ) + fi +} + +# Take the local tar files and upload them to Google Storage. They will then be +# downloaded by the master as part of the start up script for the master. +# +# Assumed vars: +# PROJECT +# SERVER_BINARY_TAR +# NODE_BINARY_TAR (optional) +# KUBE_MANIFESTS_TAR +# ZONE +# Vars set: +# SERVER_BINARY_TAR_URL +# SERVER_BINARY_TAR_HASH +# NODE_BINARY_TAR_URL +# NODE_BINARY_TAR_HASH +# KUBE_MANIFESTS_TAR_URL +# KUBE_MANIFESTS_TAR_HASH +function upload-tars() { + SERVER_BINARY_TAR_URL= + SERVER_BINARY_TAR_HASH= + NODE_BINARY_TAR_URL= + NODE_BINARY_TAR_HASH= + KUBE_MANIFESTS_TAR_URL= + KUBE_MANIFESTS_TAR_HASH= + + local project_hash + if which md5 > /dev/null 2>&1; then + project_hash=$(md5 -q -s "$PROJECT") + else + project_hash=$(echo -n "$PROJECT" | md5sum | awk '{ print $1 }') + fi + + # This requires 1 million projects before the probability of collision is 50% + # that's probably good enough for now :P + project_hash=${project_hash:0:10} + + set-preferred-region + + if [[ "${ENABLE_DOCKER_REGISTRY_CACHE:-}" == "true" ]]; then + DOCKER_REGISTRY_MIRROR_URL="https://mirror.gcr.io" + fi + + SERVER_BINARY_TAR_HASH=$(sha1sum-file "${SERVER_BINARY_TAR}") + + if [[ -n "${NODE_BINARY_TAR:-}" ]]; then + NODE_BINARY_TAR_HASH=$(sha1sum-file "${NODE_BINARY_TAR}") + fi + if [[ -n "${KUBE_MANIFESTS_TAR:-}" ]]; then + KUBE_MANIFESTS_TAR_HASH=$(sha1sum-file "${KUBE_MANIFESTS_TAR}") + fi + + local server_binary_tar_urls=() + local node_binary_tar_urls=() + local kube_manifest_tar_urls=() + + for region in "${PREFERRED_REGION[@]}"; do + suffix="-${region}" + if [[ "${suffix}" == "-us" ]]; then + suffix="" + fi + local staging_bucket="gs://kubernetes-staging-${project_hash}${suffix}" + + # Ensure the buckets are created + if ! gsutil ls "${staging_bucket}" >/dev/null; then + echo "Creating ${staging_bucket}" + gsutil mb -l "${region}" "${staging_bucket}" + fi + + local staging_path="${staging_bucket}/${INSTANCE_PREFIX}-devel" + + echo "+++ Staging tars to Google Storage: ${staging_path}" + local server_binary_gs_url="${staging_path}/${SERVER_BINARY_TAR##*/}" + copy-to-staging "${staging_path}" "${server_binary_gs_url}" "${SERVER_BINARY_TAR}" "${SERVER_BINARY_TAR_HASH}" + + if [[ -n "${NODE_BINARY_TAR:-}" ]]; then + local node_binary_gs_url="${staging_path}/${NODE_BINARY_TAR##*/}" + copy-to-staging "${staging_path}" "${node_binary_gs_url}" "${NODE_BINARY_TAR}" "${NODE_BINARY_TAR_HASH}" + fi + + # Convert from gs:// URL to an https:// URL + server_binary_tar_urls+=("${server_binary_gs_url/gs:\/\//https://storage.googleapis.com/}") + if [[ -n "${NODE_BINARY_TAR:-}" ]]; then + node_binary_tar_urls+=("${node_binary_gs_url/gs:\/\//https://storage.googleapis.com/}") + fi + if [[ -n "${KUBE_MANIFESTS_TAR:-}" ]]; then + local kube_manifests_gs_url="${staging_path}/${KUBE_MANIFESTS_TAR##*/}" + copy-to-staging "${staging_path}" "${kube_manifests_gs_url}" "${KUBE_MANIFESTS_TAR}" "${KUBE_MANIFESTS_TAR_HASH}" + # Convert from gs:// URL to an https:// URL + kube_manifests_tar_urls+=("${kube_manifests_gs_url/gs:\/\//https://storage.googleapis.com/}") + fi + done + + SERVER_BINARY_TAR_URL=$(join_csv "${server_binary_tar_urls[@]}") + if [[ -n "${NODE_BINARY_TAR:-}" ]]; then + NODE_BINARY_TAR_URL=$(join_csv "${node_binary_tar_urls[@]}") + fi + if [[ -n "${KUBE_MANIFESTS_TAR:-}" ]]; then + KUBE_MANIFESTS_TAR_URL=$(join_csv "${kube_manifests_tar_urls[@]}") + fi +} + +# Detect Linux and Windows nodes created in the instance group. +# +# Assumed vars: +# NODE_INSTANCE_PREFIX +# WINDOWS_NODE_INSTANCE_PREFIX +# Vars set: +# NODE_NAMES +# INSTANCE_GROUPS +# WINDOWS_NODE_NAMES +# WINDOWS_INSTANCE_GROUPS +function detect-node-names() { + detect-project + INSTANCE_GROUPS=() + INSTANCE_GROUPS+=($(gcloud compute instance-groups managed list \ + --project "${PROJECT}" \ + --filter "name ~ '${NODE_INSTANCE_PREFIX}-.+' AND zone:(${ZONE})" \ + --format='value(name)' || true)) + WINDOWS_INSTANCE_GROUPS=() + WINDOWS_INSTANCE_GROUPS+=($(gcloud compute instance-groups managed list \ + --project "${PROJECT}" \ + --filter "name ~ '${WINDOWS_NODE_INSTANCE_PREFIX}-.+' AND zone:(${ZONE})" \ + --format='value(name)' || true)) + + NODE_NAMES=() + if [[ -n "${INSTANCE_GROUPS[@]:-}" ]]; then + for group in "${INSTANCE_GROUPS[@]}"; do + NODE_NAMES+=($(gcloud compute instance-groups managed list-instances \ + "${group}" --zone "${ZONE}" --project "${PROJECT}" \ + --format='value(instance)')) + done + fi + # Add heapster node name to the list too (if it exists). + if [[ -n "${HEAPSTER_MACHINE_TYPE:-}" ]]; then + NODE_NAMES+=("${NODE_INSTANCE_PREFIX}-heapster") + fi + WINDOWS_NODE_NAMES=() + if [[ -n "${WINDOWS_INSTANCE_GROUPS[@]:-}" ]]; then + for group in "${WINDOWS_INSTANCE_GROUPS[@]}"; do + WINDOWS_NODE_NAMES+=($(gcloud compute instance-groups managed \ + list-instances "${group}" --zone "${ZONE}" --project "${PROJECT}" \ + --format='value(instance)')) + done + fi + + echo "INSTANCE_GROUPS=${INSTANCE_GROUPS[*]:-}" >&2 + echo "NODE_NAMES=${NODE_NAMES[*]:-}" >&2 +} + +# Detect the information about the minions +# +# Assumed vars: +# ZONE +# Vars set: +# NODE_NAMES +# KUBE_NODE_IP_ADDRESSES (array) +function detect-nodes() { + detect-project + detect-node-names + KUBE_NODE_IP_ADDRESSES=() + for (( i=0; i<${#NODE_NAMES[@]}; i++)); do + local node_ip=$(gcloud compute instances describe --project "${PROJECT}" --zone "${ZONE}" \ + "${NODE_NAMES[$i]}" --format='value(networkInterfaces[0].accessConfigs[0].natIP)') + if [[ -z "${node_ip-}" ]] ; then + echo "Did not find ${NODE_NAMES[$i]}" >&2 + else + echo "Found ${NODE_NAMES[$i]} at ${node_ip}" + KUBE_NODE_IP_ADDRESSES+=("${node_ip}") + fi + done + if [[ -z "${KUBE_NODE_IP_ADDRESSES-}" ]]; then + echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" >&2 + exit 1 + fi +} + +# Detect the IP for the master +# +# Assumed vars: +# MASTER_NAME +# ZONE +# REGION +# Vars set: +# KUBE_MASTER +# KUBE_MASTER_IP +function detect-master() { + detect-project + KUBE_MASTER=${MASTER_NAME} + echo "Trying to find master named '${MASTER_NAME}'" >&2 + if [[ -z "${KUBE_MASTER_IP-}" ]]; then + local master_address_name="${MASTER_NAME}-ip" + echo "Looking for address '${master_address_name}'" >&2 + if ! KUBE_MASTER_IP=$(gcloud compute addresses describe "${master_address_name}" \ + --project "${PROJECT}" --region "${REGION}" -q --format='value(address)') || \ + [[ -z "${KUBE_MASTER_IP-}" ]]; then + echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'" >&2 + exit 1 + fi + fi + echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)" >&2 +} + +function load-or-gen-kube-bearertoken() { + if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then + get-kubeconfig-bearertoken + fi + if [[ -z "${KUBE_BEARER_TOKEN:-}" ]]; then + gen-kube-bearertoken + fi +} + +# Figure out which binary use on the server and assure it is available. +# If KUBE_VERSION is specified use binaries specified by it, otherwise +# use local dev binaries. +# +# Assumed vars: +# KUBE_VERSION +# KUBE_RELEASE_VERSION_REGEX +# KUBE_CI_VERSION_REGEX +# Vars set: +# KUBE_TAR_HASH +# SERVER_BINARY_TAR_URL +# SERVER_BINARY_TAR_HASH +function tars_from_version() { + local sha1sum="" + if which sha1sum >/dev/null 2>&1; then + sha1sum="sha1sum" + else + sha1sum="shasum -a1" + fi + + if [[ -z "${KUBE_VERSION-}" ]]; then + find-release-tars + upload-tars + elif [[ ${KUBE_VERSION} =~ ${KUBE_RELEASE_VERSION_REGEX} ]]; then + SERVER_BINARY_TAR_URL="https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz" + # TODO: Clean this up. + KUBE_MANIFESTS_TAR_URL="${SERVER_BINARY_TAR_URL/server-linux-amd64/manifests}" + KUBE_MANIFESTS_TAR_HASH=$(curl ${KUBE_MANIFESTS_TAR_URL} --silent --show-error | ${sha1sum} | awk '{print $1}') + elif [[ ${KUBE_VERSION} =~ ${KUBE_CI_VERSION_REGEX} ]]; then + SERVER_BINARY_TAR_URL="https://storage.googleapis.com/kubernetes-release-dev/ci/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz" + # TODO: Clean this up. + KUBE_MANIFESTS_TAR_URL="${SERVER_BINARY_TAR_URL/server-linux-amd64/manifests}" + KUBE_MANIFESTS_TAR_HASH=$(curl ${KUBE_MANIFESTS_TAR_URL} --silent --show-error | ${sha1sum} | awk '{print $1}') + else + echo "Version doesn't match regexp" >&2 + exit 1 + fi + if ! SERVER_BINARY_TAR_HASH=$(curl -Ss --fail "${SERVER_BINARY_TAR_URL}.sha1"); then + echo "Failure trying to curl release .sha1" + fi + + if ! curl -Ss --head "${SERVER_BINARY_TAR_URL}" >&/dev/null; then + echo "Can't find release at ${SERVER_BINARY_TAR_URL}" >&2 + exit 1 + fi +} + +# Reads kube-env metadata from master +# +# Assumed vars: +# KUBE_MASTER +# PROJECT +# ZONE +function get-master-env() { + # TODO(zmerlynn): Make this more reliable with retries. + gcloud compute --project ${PROJECT} ssh --zone ${ZONE} ${KUBE_MASTER} --command \ + "curl --fail --silent -H 'Metadata-Flavor: Google' \ + 'http://metadata/computeMetadata/v1/instance/attributes/kube-env'" 2>/dev/null + gcloud compute --project ${PROJECT} ssh --zone ${ZONE} ${KUBE_MASTER} --command \ + "curl --fail --silent -H 'Metadata-Flavor: Google' \ + 'http://metadata/computeMetadata/v1/instance/attributes/kube-master-certs'" 2>/dev/null +} + +# Quote something appropriate for a yaml string. +# +# TODO(zmerlynn): Note that this function doesn't so much "quote" as +# "strip out quotes", and we really should be using a YAML library for +# this, but PyYAML isn't shipped by default, and *rant rant rant ... SIGH* +function yaml-quote { + echo "'$(echo "${@:-}" | sed -e "s/'/''/g")'" +} + +# Writes the cluster location into a temporary file. +# Assumed vars +# ZONE +function write-cluster-location { + cat >"${KUBE_TEMP}/cluster-location.txt" << EOF +${ZONE} +EOF +} + +# Writes the cluster name into a temporary file. +# Assumed vars +# CLUSTER_NAME +function write-cluster-name { + cat >"${KUBE_TEMP}/cluster-name.txt" << EOF +${CLUSTER_NAME} +EOF +} + +function write-master-env { + # If the user requested that the master be part of the cluster, set the + # environment variable to program the master kubelet to register itself. + if [[ "${REGISTER_MASTER_KUBELET:-}" == "true" && -z "${KUBELET_APISERVER:-}" ]]; then + KUBELET_APISERVER="${MASTER_NAME}" + fi + if [[ -z "${KUBERNETES_MASTER_NAME:-}" ]]; then + KUBERNETES_MASTER_NAME="${MASTER_NAME}" + fi + + construct-linux-kubelet-flags true + build-linux-kube-env true "${KUBE_TEMP}/master-kube-env.yaml" + build-kubelet-config true "linux" "${KUBE_TEMP}/master-kubelet-config.yaml" + build-kube-master-certs "${KUBE_TEMP}/kube-master-certs.yaml" +} + +function write-linux-node-env { + if [[ -z "${KUBERNETES_MASTER_NAME:-}" ]]; then + KUBERNETES_MASTER_NAME="${MASTER_NAME}" + fi + + construct-linux-kubelet-flags false + build-linux-kube-env false "${KUBE_TEMP}/node-kube-env.yaml" + build-kubelet-config false "linux" "${KUBE_TEMP}/node-kubelet-config.yaml" +} + +function write-windows-node-env { + construct-windows-kubelet-flags + construct-windows-kubeproxy-flags + build-windows-kube-env "${KUBE_TEMP}/windows-node-kube-env.yaml" + build-kubelet-config false "windows" "${KUBE_TEMP}/windows-node-kubelet-config.yaml" +} + +function build-linux-node-labels { + local master=$1 + local node_labels="" + if [[ "${KUBE_PROXY_DAEMONSET:-}" == "true" && "${master}" != "true" ]]; then + # Add kube-proxy daemonset label to node to avoid situation during cluster + # upgrade/downgrade when there are two instances of kube-proxy running on a node. + # TODO(liggitt): drop beta.kubernetes.io/kube-proxy-ds-ready in 1.16 + node_labels="node.kubernetes.io/kube-proxy-ds-ready=true,beta.kubernetes.io/kube-proxy-ds-ready=true" + fi + if [[ -n "${NODE_LABELS:-}" ]]; then + node_labels="${node_labels:+${node_labels},}${NODE_LABELS}" + fi + if [[ -n "${NON_MASTER_NODE_LABELS:-}" && "${master}" != "true" ]]; then + node_labels="${node_labels:+${node_labels},}${NON_MASTER_NODE_LABELS}" + fi + echo $node_labels +} + +function build-windows-node-labels { + local node_labels="" + if [[ -n "${WINDOWS_NODE_LABELS:-}" ]]; then + node_labels="${node_labels:+${node_labels},}${WINDOWS_NODE_LABELS}" + fi + if [[ -n "${WINDOWS_NON_MASTER_NODE_LABELS:-}" ]]; then + node_labels="${node_labels:+${node_labels},}${WINDOWS_NON_MASTER_NODE_LABELS}" + fi + echo $node_labels +} + +# yaml-map-string-stringarray converts the encoded structure to yaml format, and echoes the result +# under the provided name. If the encoded structure is empty, echoes nothing. +# 1: name to be output in yaml +# 2: encoded map-string-string (which may contain duplicate keys - resulting in map-string-stringarray) +# 3: key-value separator (defaults to ':') +# 4: item separator (defaults to ',') +function yaml-map-string-stringarray { + declare -r name="${1}" + declare -r encoded="${2}" + declare -r kv_sep="${3:-:}" + declare -r item_sep="${4:-,}" + + declare -a pairs # indexed array + declare -A map # associative array + IFS="${item_sep}" read -ra pairs <<<"${encoded}" # split on item_sep + for pair in "${pairs[@]}"; do + declare key + declare value + IFS="${kv_sep}" read -r key value <<<"${pair}" # split on kv_sep + map[$key]="${map[$key]+${map[$key]}${item_sep}}${value}" # append values from duplicate keys + done + # only output if there is a non-empty map + if [[ ${#map[@]} -gt 0 ]]; then + echo "${name}:" + for k in "${!map[@]}"; do + echo " ${k}:" + declare -a values + IFS="${item_sep}" read -ra values <<<"${map[$k]}" + for val in "${values[@]}"; do + # declare across two lines so errexit can catch failures + declare v + v=$(yaml-quote "${val}") + echo " - ${v}" + done + done + fi +} + +# yaml-map-string-string converts the encoded structure to yaml format, and echoes the result +# under the provided name. If the encoded structure is empty, echoes nothing. +# 1: name to be output in yaml +# 2: encoded map-string-string (no duplicate keys) +# 3: bool, whether to yaml-quote the value string in the output (defaults to true) +# 4: key-value separator (defaults to ':') +# 5: item separator (defaults to ',') +function yaml-map-string-string { + declare -r name="${1}" + declare -r encoded="${2}" + declare -r quote_val_string="${3:-true}" + declare -r kv_sep="${4:-:}" + declare -r item_sep="${5:-,}" + + declare -a pairs # indexed array + declare -A map # associative array + IFS="${item_sep}" read -ra pairs <<<"${encoded}" # split on item_sep # TODO(mtaufen): try quoting this too + for pair in "${pairs[@]}"; do + declare key + declare value + IFS="${kv_sep}" read -r key value <<<"${pair}" # split on kv_sep + map[$key]="${value}" # add to associative array + done + # only output if there is a non-empty map + if [[ ${#map[@]} -gt 0 ]]; then + echo "${name}:" + for k in "${!map[@]}"; do + if [[ "${quote_val_string}" == "true" ]]; then + # declare across two lines so errexit can catch failures + declare v + v=$(yaml-quote "${map[$k]}") + echo " ${k}: ${v}" + else + echo " ${k}: ${map[$k]}" + fi + done + fi +} + +# Returns kubelet flags used on both Linux and Windows nodes. +function construct-common-kubelet-flags { + local flags="${KUBELET_TEST_LOG_LEVEL:-"--v=2"} ${KUBELET_TEST_ARGS:-}" + flags+=" --cloud-provider=gce" + # TODO(mtaufen): ROTATE_CERTIFICATES seems unused; delete it? + if [[ -n "${ROTATE_CERTIFICATES:-}" ]]; then + flags+=" --rotate-certificates=true" + fi + if [[ -n "${MAX_PODS_PER_NODE:-}" ]]; then + flags+=" --max-pods=${MAX_PODS_PER_NODE}" + fi + echo $flags +} + +# Sets KUBELET_ARGS with the kubelet flags for Linux nodes. +# $1: if 'true', we're rendering flags for a master, else a node +function construct-linux-kubelet-flags { + local master="$1" + local flags="$(construct-common-kubelet-flags)" + flags+=" --allow-privileged=true" + # Keep in sync with CONTAINERIZED_MOUNTER_HOME in configure-helper.sh + flags+=" --experimental-mounter-path=/home/kubernetes/containerized_mounter/mounter" + flags+=" --experimental-check-node-capabilities-before-mount=true" + # Keep in sync with the mkdir command in configure-helper.sh (until the TODO is resolved) + flags+=" --cert-dir=/var/lib/kubelet/pki/" + # Configure the directory that the Kubelet should use to store dynamic config checkpoints + flags+=" --dynamic-config-dir=/var/lib/kubelet/dynamic-config" + + + if [[ "${master}" == "true" ]]; then + flags+=" ${MASTER_KUBELET_TEST_ARGS:-}" + if [[ "${REGISTER_MASTER_KUBELET:-false}" == "true" ]]; then + #TODO(mikedanese): allow static pods to start before creating a client + #flags+=" --bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig" + #flags+=" --kubeconfig=/var/lib/kubelet/kubeconfig" + flags+=" --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig" + flags+=" --register-schedulable=false" + fi + else # For nodes + flags+=" ${NODE_KUBELET_TEST_ARGS:-}" + flags+=" --bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig" + flags+=" --kubeconfig=/var/lib/kubelet/kubeconfig" + fi + # Network plugin + if [[ -n "${NETWORK_PROVIDER:-}" || -n "${NETWORK_POLICY_PROVIDER:-}" ]]; then + flags+=" --cni-bin-dir=/home/kubernetes/bin" + if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" || "${ENABLE_NETD:-}" == "true" ]]; then + # Calico uses CNI always. + # Note that network policy won't work for master node. + if [[ "${master}" == "true" ]]; then + flags+=" --network-plugin=${NETWORK_PROVIDER}" + else + flags+=" --network-plugin=cni" + fi + else + # Otherwise use the configured value. + flags+=" --network-plugin=${NETWORK_PROVIDER}" + + fi + fi + if [[ -n "${NON_MASQUERADE_CIDR:-}" ]]; then + flags+=" --non-masquerade-cidr=${NON_MASQUERADE_CIDR}" + fi + flags+=" --volume-plugin-dir=${VOLUME_PLUGIN_DIR}" + local node_labels="$(build-linux-node-labels ${master})" + if [[ -n "${node_labels:-}" ]]; then + flags+=" --node-labels=${node_labels}" + fi + if [[ -n "${NODE_TAINTS:-}" ]]; then + flags+=" --register-with-taints=${NODE_TAINTS}" + fi + if [[ -n "${CONTAINER_RUNTIME:-}" ]]; then + flags+=" --container-runtime=${CONTAINER_RUNTIME}" + fi + if [[ -n "${CONTAINER_RUNTIME_ENDPOINT:-}" ]]; then + flags+=" --container-runtime-endpoint=${CONTAINER_RUNTIME_ENDPOINT}" + fi + + KUBELET_ARGS="${flags}" +} + +# Sets KUBELET_ARGS with the kubelet flags for Windows nodes. +function construct-windows-kubelet-flags { + local flags="$(construct-common-kubelet-flags)" + + # Note: NODE_KUBELET_TEST_ARGS is empty in typical kube-up runs. + flags+=" ${NODE_KUBELET_TEST_ARGS:-}" + + local node_labels="$(build-windows-node-labels)" + if [[ -n "${node_labels:-}" ]]; then + flags+=" --node-labels=${node_labels}" + fi + + # Concatenate common and windows-only node taints and apply them. + local node_taints="${NODE_TAINTS:-}" + if [[ -n "${node_taints}" && -n "${WINDOWS_NODE_TAINTS:-}" ]]; then + node_taints+=":${WINDOWS_NODE_TAINTS}" + else + node_taints="${WINDOWS_NODE_TAINTS:-}" + fi + if [[ -n "${node_taints}" ]]; then + flags+=" --register-with-taints=${node_taints}" + fi + + # Many of these flags were adapted from + # https://github.com/Microsoft/SDN/blob/master/Kubernetes/windows/start-kubelet.ps1. + flags+=" --config=${WINDOWS_KUBELET_CONFIG_FILE}" + + # Path to a kubeconfig file that will be used to get client certificate for + # kubelet. If the file specified by --kubeconfig does not exist, the bootstrap + # kubeconfig is used to request a client certificate from the API server. On + # success, a kubeconfig file referencing the generated client certificate and + # key is written to the path specified by --kubeconfig. The client certificate + # and key file will be stored in the directory pointed by --cert-dir. + # + # See also: + # https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/ + flags+=" --bootstrap-kubeconfig=${WINDOWS_BOOTSTRAP_KUBECONFIG_FILE}" + flags+=" --kubeconfig=${WINDOWS_KUBECONFIG_FILE}" + + # The directory where the TLS certs are located. + flags+=" --cert-dir=${WINDOWS_PKI_DIR}" + + flags+=" --network-plugin=cni" + flags+=" --cni-bin-dir=${WINDOWS_CNI_DIR}" + flags+=" --cni-conf-dir=${WINDOWS_CNI_CONFIG_DIR}" + flags+=" --pod-manifest-path=${WINDOWS_MANIFESTS_DIR}" + + # Windows images are large and we don't have gcr mirrors yet. Allow longer + # pull progress deadline. + flags+=" --image-pull-progress-deadline=5m" + flags+=" --enable-debugging-handlers=true" + + # Configure kubelet to run as a windows service. + flags+=" --windows-service=true" + + # TODO(mtaufen): Configure logging for kubelet running as a service. I haven't + # been able to figure out how to direct stdout/stderr into log files when + # configuring it to run via sc.exe, so we just manually override logging + # config here. + flags+=" --log-file=${WINDOWS_LOGS_DIR}\kubelet.log" + # klog sets this to true internally, so need to override to false so we + # actually log to the file + flags+=" --logtostderr=false" + + # Configure flags with explicit empty string values. We can't escape + # double-quotes, because they still break sc.exe after expansion in the + # binPath parameter, and single-quotes get parsed as characters instead of + # string delimiters. + flags+=" --resolv-conf=" + + # Both --cgroups-per-qos and --enforce-node-allocatable should be disabled on + # windows; the latter requires the former to be enabled to work. + flags+=" --cgroups-per-qos=false --enforce-node-allocatable=" + + # Turn off kernel memory cgroup notification. + flags+=" --experimental-kernel-memcg-notification=false" + + KUBELET_ARGS="${flags}" +} + +function construct-windows-kubeproxy-flags { + local flags="" + + # Use the same log level as the Kubelet during tests. + flags+=" ${KUBELET_TEST_LOG_LEVEL:-"--v=2"}" + + # Windows uses kernelspace proxymode + flags+=" --proxy-mode=kernelspace" + + # Configure kube-proxy to run as a windows service. + flags+=" --windows-service=true" + + # TODO(mtaufen): Configure logging for kube-proxy running as a service. + # I haven't been able to figure out how to direct stdout/stderr into log + # files when configuring it to run via sc.exe, so we just manually + # override logging config here. + flags+=" --log-file=${WINDOWS_LOGS_DIR}\kube-proxy.log" + + # klog sets this to true internally, so need to override to false + # so we actually log to the file + flags+=" --logtostderr=false" + + # Configure flags with explicit empty string values. We can't escape + # double-quotes, because they still break sc.exe after expansion in the + # binPath parameter, and single-quotes get parsed as characters instead + # of string delimiters. + flags+=" --resource-container=" + + KUBEPROXY_ARGS="${flags}" +} + +# $1: if 'true', we're rendering config for a master, else a node +function build-kubelet-config { + local master="$1" + local os="$2" + local file="$3" + + rm -f "${file}" + { + print-common-kubelet-config + if [[ "${master}" == "true" ]]; then + print-master-kubelet-config + else + print-common-node-kubelet-config + if [[ "${os}" == "linux" ]]; then + print-linux-node-kubelet-config + elif [[ "${os}" == "windows" ]]; then + print-windows-node-kubelet-config + else + echo "Unknown OS ${os}" >&2 + exit 1 + fi + fi + } > "${file}" +} + +# cat the Kubelet config yaml in common between masters, linux nodes, and +# windows nodes +function print-common-kubelet-config { + declare quoted_dns_server_ip + declare quoted_dns_domain + quoted_dns_server_ip=$(yaml-quote "${DNS_SERVER_IP}") + if [[ "${ENABLE_NODELOCAL_DNS:-}" == "true" ]]; then + quoted_dns_server_ip=$(yaml-quote "${LOCAL_DNS_IP}") + fi + quoted_dns_domain=$(yaml-quote "${DNS_DOMAIN}") + cat <$file <$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file </dev/null 2>&1; then + sha1sum "$1" | awk '{ print $1 }' + else + shasum -a1 "$1" | awk '{ print $1 }' + fi +} + +# Create certificate pairs for the cluster. +# $1: The public IP for the master. +# +# These are used for static cert distribution (e.g. static clustering) at +# cluster creation time. This will be obsoleted once we implement dynamic +# clustering. +# +# The following certificate pairs are created: +# +# - ca (the cluster's certificate authority) +# - server +# - kubelet +# - kubecfg (for kubectl) +# +# TODO(roberthbailey): Replace easyrsa with a simple Go program to generate +# the certs that we need. +# +# Assumed vars +# KUBE_TEMP +# MASTER_NAME +# +# Vars set: +# CERT_DIR +# CA_CERT_BASE64 +# MASTER_CERT_BASE64 +# MASTER_KEY_BASE64 +# KUBELET_CERT_BASE64 +# KUBELET_KEY_BASE64 +# KUBECFG_CERT_BASE64 +# KUBECFG_KEY_BASE64 +function create-certs { + local -r primary_cn="${1}" + + # Determine extra certificate names for master + local octets=($(echo "${SERVICE_CLUSTER_IP_RANGE}" | sed -e 's|/.*||' -e 's/\./ /g')) + ((octets[3]+=1)) + local -r service_ip=$(echo "${octets[*]}" | sed 's/ /./g') + local sans="" + for extra in $@; do + if [[ -n "${extra}" ]]; then + sans="${sans}IP:${extra}," + fi + done + sans="${sans}IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${DNS_DOMAIN},DNS:${MASTER_NAME}" + + echo "Generating certs for alternate-names: ${sans}" + + setup-easyrsa + PRIMARY_CN="${primary_cn}" SANS="${sans}" generate-certs + AGGREGATOR_PRIMARY_CN="${primary_cn}" AGGREGATOR_SANS="${sans}" generate-aggregator-certs + + # By default, linux wraps base64 output every 76 cols, so we use 'tr -d' to remove whitespaces. + # Note 'base64 -w0' doesn't work on Mac OS X, which has different flags. + CA_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/ca.key" | base64 | tr -d '\r\n') + CA_CERT_BASE64=$(cat "${CERT_DIR}/pki/ca.crt" | base64 | tr -d '\r\n') + MASTER_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/${MASTER_NAME}.crt" | base64 | tr -d '\r\n') + MASTER_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/${MASTER_NAME}.key" | base64 | tr -d '\r\n') + KUBELET_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/kubelet.crt" | base64 | tr -d '\r\n') + KUBELET_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/kubelet.key" | base64 | tr -d '\r\n') + KUBECFG_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/kubecfg.crt" | base64 | tr -d '\r\n') + KUBECFG_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/kubecfg.key" | base64 | tr -d '\r\n') + KUBEAPISERVER_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/kube-apiserver.crt" | base64 | tr -d '\r\n') + KUBEAPISERVER_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/kube-apiserver.key" | base64 | tr -d '\r\n') + + # Setting up an addition directory (beyond pki) as it is the simplest way to + # ensure we get a different CA pair to sign the proxy-client certs and which + # we can send CA public key to the user-apiserver to validate communication. + AGGREGATOR_CA_KEY_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/private/ca.key" | base64 | tr -d '\r\n') + REQUESTHEADER_CA_CERT_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/ca.crt" | base64 | tr -d '\r\n') + PROXY_CLIENT_CERT_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/issued/proxy-client.crt" | base64 | tr -d '\r\n') + PROXY_CLIENT_KEY_BASE64=$(cat "${AGGREGATOR_CERT_DIR}/pki/private/proxy-client.key" | base64 | tr -d '\r\n') +} + +# Set up easy-rsa directory structure. +# +# Assumed vars +# KUBE_TEMP +# +# Vars set: +# CERT_DIR +# AGGREGATOR_CERT_DIR +function setup-easyrsa { + local -r cert_create_debug_output=$(mktemp "${KUBE_TEMP}/cert_create_debug_output.XXX") + # Note: This was heavily cribbed from make-ca-cert.sh + (set -x + cd "${KUBE_TEMP}" + curl -L -O --connect-timeout 20 --retry 6 --retry-delay 2 https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz + tar xzf easy-rsa.tar.gz + mkdir easy-rsa-master/kubelet + cp -r easy-rsa-master/easyrsa3/* easy-rsa-master/kubelet + mkdir easy-rsa-master/aggregator + cp -r easy-rsa-master/easyrsa3/* easy-rsa-master/aggregator) &>${cert_create_debug_output} || true + CERT_DIR="${KUBE_TEMP}/easy-rsa-master/easyrsa3" + AGGREGATOR_CERT_DIR="${KUBE_TEMP}/easy-rsa-master/aggregator" + if [ ! -x "${CERT_DIR}/easyrsa" -o ! -x "${AGGREGATOR_CERT_DIR}/easyrsa" ]; then + # TODO(roberthbailey,porridge): add better error handling here, + # see https://github.com/kubernetes/kubernetes/issues/55229 + cat "${cert_create_debug_output}" >&2 + echo "=== Failed to setup easy-rsa: Aborting ===" >&2 + exit 2 + fi +} + +# Runs the easy RSA commands to generate certificate files. +# The generated files are IN ${CERT_DIR} +# +# Assumed vars +# KUBE_TEMP +# MASTER_NAME +# CERT_DIR +# PRIMARY_CN: Primary canonical name +# SANS: Subject alternate names +# +# +function generate-certs { + local -r cert_create_debug_output=$(mktemp "${KUBE_TEMP}/cert_create_debug_output.XXX") + # Note: This was heavily cribbed from make-ca-cert.sh + (set -x + cd "${CERT_DIR}" + ./easyrsa init-pki + # this puts the cert into pki/ca.crt and the key into pki/private/ca.key + ./easyrsa --batch "--req-cn=${PRIMARY_CN}@$(date +%s)" build-ca nopass + ./easyrsa --subject-alt-name="${SANS}" build-server-full "${MASTER_NAME}" nopass + ./easyrsa build-client-full kube-apiserver nopass + + kube::util::ensure-cfssl "${KUBE_TEMP}/cfssl" + + # make the config for the signer + echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment","client auth"]}}}' > "ca-config.json" + # create the kubelet client cert with the correct groups + echo '{"CN":"kubelet","names":[{"O":"system:nodes"}],"hosts":[""],"key":{"algo":"rsa","size":2048}}' | "${CFSSL_BIN}" gencert -ca=pki/ca.crt -ca-key=pki/private/ca.key -config=ca-config.json - | "${CFSSLJSON_BIN}" -bare kubelet + mv "kubelet-key.pem" "pki/private/kubelet.key" + mv "kubelet.pem" "pki/issued/kubelet.crt" + rm -f "kubelet.csr" + + # Make a superuser client cert with subject "O=system:masters, CN=kubecfg" + ./easyrsa --dn-mode=org \ + --req-cn=kubecfg --req-org=system:masters \ + --req-c= --req-st= --req-city= --req-email= --req-ou= \ + build-client-full kubecfg nopass) &>${cert_create_debug_output} || true + local output_file_missing=0 + local output_file + for output_file in \ + "${CERT_DIR}/pki/private/ca.key" \ + "${CERT_DIR}/pki/ca.crt" \ + "${CERT_DIR}/pki/issued/${MASTER_NAME}.crt" \ + "${CERT_DIR}/pki/private/${MASTER_NAME}.key" \ + "${CERT_DIR}/pki/issued/kubelet.crt" \ + "${CERT_DIR}/pki/private/kubelet.key" \ + "${CERT_DIR}/pki/issued/kubecfg.crt" \ + "${CERT_DIR}/pki/private/kubecfg.key" \ + "${CERT_DIR}/pki/issued/kube-apiserver.crt" \ + "${CERT_DIR}/pki/private/kube-apiserver.key" + do + if [[ ! -s "${output_file}" ]]; then + echo "Expected file ${output_file} not created" >&2 + output_file_missing=1 + fi + done + if (( $output_file_missing )); then + # TODO(roberthbailey,porridge): add better error handling here, + # see https://github.com/kubernetes/kubernetes/issues/55229 + cat "${cert_create_debug_output}" >&2 + echo "=== Failed to generate master certificates: Aborting ===" >&2 + exit 2 + fi +} + +# Runs the easy RSA commands to generate aggregator certificate files. +# The generated files are in ${AGGREGATOR_CERT_DIR} +# +# Assumed vars +# KUBE_TEMP +# AGGREGATOR_MASTER_NAME +# AGGREGATOR_CERT_DIR +# AGGREGATOR_PRIMARY_CN: Primary canonical name +# AGGREGATOR_SANS: Subject alternate names +# +# +function generate-aggregator-certs { + local -r cert_create_debug_output=$(mktemp "${KUBE_TEMP}/cert_create_debug_output.XXX") + # Note: This was heavily cribbed from make-ca-cert.sh + (set -x + cd "${KUBE_TEMP}/easy-rsa-master/aggregator" + ./easyrsa init-pki + # this puts the cert into pki/ca.crt and the key into pki/private/ca.key + ./easyrsa --batch "--req-cn=${AGGREGATOR_PRIMARY_CN}@$(date +%s)" build-ca nopass + ./easyrsa --subject-alt-name="${AGGREGATOR_SANS}" build-server-full "${AGGREGATOR_MASTER_NAME}" nopass + ./easyrsa build-client-full aggregator-apiserver nopass + + kube::util::ensure-cfssl "${KUBE_TEMP}/cfssl" + + # make the config for the signer + echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment","client auth"]}}}' > "ca-config.json" + # create the aggregator client cert with the correct groups + echo '{"CN":"aggregator","hosts":[""],"key":{"algo":"rsa","size":2048}}' | "${CFSSL_BIN}" gencert -ca=pki/ca.crt -ca-key=pki/private/ca.key -config=ca-config.json - | "${CFSSLJSON_BIN}" -bare proxy-client + mv "proxy-client-key.pem" "pki/private/proxy-client.key" + mv "proxy-client.pem" "pki/issued/proxy-client.crt" + rm -f "proxy-client.csr" + + # Make a superuser client cert with subject "O=system:masters, CN=kubecfg" + ./easyrsa --dn-mode=org \ + --req-cn=proxy-clientcfg --req-org=system:aggregator \ + --req-c= --req-st= --req-city= --req-email= --req-ou= \ + build-client-full proxy-clientcfg nopass) &>${cert_create_debug_output} || true + local output_file_missing=0 + local output_file + for output_file in \ + "${AGGREGATOR_CERT_DIR}/pki/private/ca.key" \ + "${AGGREGATOR_CERT_DIR}/pki/ca.crt" \ + "${AGGREGATOR_CERT_DIR}/pki/issued/proxy-client.crt" \ + "${AGGREGATOR_CERT_DIR}/pki/private/proxy-client.key" + do + if [[ ! -s "${output_file}" ]]; then + echo "Expected file ${output_file} not created" >&2 + output_file_missing=1 + fi + done + if (( $output_file_missing )); then + # TODO(roberthbailey,porridge): add better error handling here, + # see https://github.com/kubernetes/kubernetes/issues/55229 + cat "${cert_create_debug_output}" >&2 + echo "=== Failed to generate aggregator certificates: Aborting ===" >&2 + exit 2 + fi +} + +# +# Using provided master env, extracts value from provided key. +# +# Args: +# $1 master env (kube-env of master; result of calling get-master-env) +# $2 env key to use +function get-env-val() { + local match=`(echo "${1}" | grep -E "^${2}:") || echo ""` + if [[ -z ${match} ]]; then + echo "" + fi + echo ${match} | cut -d : -f 2 | cut -d \' -f 2 +} + +# Load the master env by calling get-master-env, and extract important values +function parse-master-env() { + # Get required master env vars + local master_env=$(get-master-env) + KUBE_PROXY_TOKEN=$(get-env-val "${master_env}" "KUBE_PROXY_TOKEN") + NODE_PROBLEM_DETECTOR_TOKEN=$(get-env-val "${master_env}" "NODE_PROBLEM_DETECTOR_TOKEN") + CA_CERT_BASE64=$(get-env-val "${master_env}" "CA_CERT") + CA_KEY_BASE64=$(get-env-val "${master_env}" "CA_KEY") + KUBEAPISERVER_CERT_BASE64=$(get-env-val "${master_env}" "KUBEAPISERVER_CERT") + KUBEAPISERVER_KEY_BASE64=$(get-env-val "${master_env}" "KUBEAPISERVER_KEY") + EXTRA_DOCKER_OPTS=$(get-env-val "${master_env}" "EXTRA_DOCKER_OPTS") + KUBELET_CERT_BASE64=$(get-env-val "${master_env}" "KUBELET_CERT") + KUBELET_KEY_BASE64=$(get-env-val "${master_env}" "KUBELET_KEY") + MASTER_CERT_BASE64=$(get-env-val "${master_env}" "MASTER_CERT") + MASTER_KEY_BASE64=$(get-env-val "${master_env}" "MASTER_KEY") + AGGREGATOR_CA_KEY_BASE64=$(get-env-val "${master_env}" "AGGREGATOR_CA_KEY") + REQUESTHEADER_CA_CERT_BASE64=$(get-env-val "${master_env}" "REQUESTHEADER_CA_CERT") + PROXY_CLIENT_CERT_BASE64=$(get-env-val "${master_env}" "PROXY_CLIENT_CERT") + PROXY_CLIENT_KEY_BASE64=$(get-env-val "${master_env}" "PROXY_CLIENT_KEY") + ENABLE_LEGACY_ABAC=$(get-env-val "${master_env}" "ENABLE_LEGACY_ABAC") + ETCD_APISERVER_CA_KEY_BASE64=$(get-env-val "${master_env}" "ETCD_APISERVER_CA_KEY") + ETCD_APISERVER_CA_CERT_BASE64=$(get-env-val "${master_env}" "ETCD_APISERVER_CA_CERT") + ETCD_APISERVER_SERVER_KEY_BASE64=$(get-env-val "${master_env}" "ETCD_APISERVER_SERVER_KEY") + ETCD_APISERVER_SERVER_CERT_BASE64=$(get-env-val "${master_env}" "ETCD_APISERVER_SERVER_CERT") + ETCD_APISERVER_CLIENT_KEY_BASE64=$(get-env-val "${master_env}" "ETCD_APISERVER_CLIENT_KEY") + ETCD_APISERVER_CLIENT_CERT_BASE64=$(get-env-val "${master_env}" "ETCD_APISERVER_CLIENT_CERT") +} + +# Update or verify required gcloud components are installed +# at minimum required version. +# Assumed vars +# KUBE_PROMPT_FOR_UPDATE +function update-or-verify-gcloud() { + local sudo_prefix="" + if [ ! -w $(dirname `which gcloud`) ]; then + sudo_prefix="sudo" + fi + # update and install components as needed + if [[ "${KUBE_PROMPT_FOR_UPDATE}" == "y" ]]; then + ${sudo_prefix} gcloud ${gcloud_prompt:-} components install alpha + ${sudo_prefix} gcloud ${gcloud_prompt:-} components install beta + ${sudo_prefix} gcloud ${gcloud_prompt:-} components update + else + local version=$(gcloud version --format=json) + python -c' +import json,sys +from distutils import version + +minVersion = version.LooseVersion("1.3.0") +required = [ "alpha", "beta", "core" ] +data = json.loads(sys.argv[1]) +rel = data.get("Google Cloud SDK") +if "CL @" in rel: + print("Using dev version of gcloud: %s" %rel) + exit(0) +if rel != "HEAD" and version.LooseVersion(rel) < minVersion: + print("gcloud version out of date ( < %s )" % minVersion) + exit(1) +missing = [] +for c in required: + if not data.get(c): + missing += [c] +if missing: + for c in missing: + print ("missing required gcloud component \"{0}\"".format(c)) + print ("Try running `gcloud components install {0}`".format(c)) + exit(1) + ' """${version}""" + fi +} + +# Robustly try to create a static ip. +# $1: The name of the ip to create +# $2: The name of the region to create the ip in. +function create-static-ip() { + detect-project + local attempt=0 + local REGION="$2" + while true; do + if gcloud compute addresses create "$1" \ + --project "${PROJECT}" \ + --region "${REGION}" -q > /dev/null; then + # successful operation - wait until it's visible + start="$(date +%s)" + while true; do + now="$(date +%s)" + # Timeout set to 15 minutes + if [[ $((now - start)) -gt 900 ]]; then + echo "Timeout while waiting for master IP visibility" + exit 2 + fi + if gcloud compute addresses describe "$1" --project "${PROJECT}" --region "${REGION}" >/dev/null 2>&1; then + break + fi + echo "Master IP not visible yet. Waiting..." + sleep 5 + done + break + fi + + if gcloud compute addresses describe "$1" \ + --project "${PROJECT}" \ + --region "${REGION}" >/dev/null 2>&1; then + # it exists - postcondition satisfied + break + fi + + if (( attempt > 4 )); then + echo -e "${color_red}Failed to create static ip $1 ${color_norm}" >&2 + exit 2 + fi + attempt=$(($attempt+1)) + echo -e "${color_yellow}Attempt $attempt failed to create static ip $1. Retrying.${color_norm}" >&2 + sleep $(($attempt * 5)) + done +} + +# Robustly try to create a firewall rule. +# $1: The name of firewall rule. +# $2: IP ranges. +# $3: Target tags for this firewall rule. +function create-firewall-rule() { + detect-project + local attempt=0 + while true; do + if ! gcloud compute firewall-rules create "$1" \ + --project "${NETWORK_PROJECT}" \ + --network "${NETWORK}" \ + --source-ranges "$2" \ + --target-tags "$3" \ + --allow tcp,udp,icmp,esp,ah,sctp; then + if (( attempt > 4 )); then + echo -e "${color_red}Failed to create firewall rule $1 ${color_norm}" >&2 + exit 2 + fi + echo -e "${color_yellow}Attempt $(($attempt+1)) failed to create firewall rule $1. Retrying.${color_norm}" >&2 + attempt=$(($attempt+1)) + sleep $(($attempt * 5)) + else + break + fi + done +} + +# Format the string argument for gcloud network. +function make-gcloud-network-argument() { + local network_project="$1" + local region="$2" + local network="$3" + local subnet="$4" + local address="$5" # optional + local enable_ip_alias="$6" # optional + local alias_size="$7" # optional + + local networkURL="projects/${network_project}/global/networks/${network}" + local subnetURL="projects/${network_project}/regions/${region}/subnetworks/${subnet:-}" + + local ret="" + + if [[ "${enable_ip_alias}" == 'true' ]]; then + ret="--network-interface" + ret="${ret} network=${networkURL}" + # If address is omitted, instance will not receive an external IP. + ret="${ret},address=${address:-}" + ret="${ret},subnet=${subnetURL}" + ret="${ret},aliases=pods-default:${alias_size}" + ret="${ret} --no-can-ip-forward" + else + if [[ -n ${subnet:-} ]]; then + ret="${ret} --subnet ${subnetURL}" + else + ret="${ret} --network ${networkURL}" + fi + + ret="${ret} --can-ip-forward" + if [[ -n ${address:-} ]]; then + ret="${ret} --address ${address}" + fi + fi + + echo "${ret}" +} + +# $1: version (required) +# $2: Prefix for the template name, i.e. NODE_INSTANCE_PREFIX or +# WINDOWS_NODE_INSTANCE_PREFIX. +function get-template-name-from-version() { + local -r version=${1} + local -r template_prefix=${2} + # trim template name to pass gce name validation + echo "${template_prefix}-template-${version}" | cut -c 1-63 | sed 's/[\.\+]/-/g;s/-*$//g' +} + +# validates the NODE_LOCAL_SSDS_EXT variable +function validate-node-local-ssds-ext(){ + ssdopts="${1}" + + if [[ -z "${ssdopts[0]}" || -z "${ssdopts[1]}" || -z "${ssdopts[2]}" ]]; then + echo -e "${color_red}Local SSD: NODE_LOCAL_SSDS_EXT is malformed, found ${ssdopts[0]-_},${ssdopts[1]-_},${ssdopts[2]-_} ${color_norm}" >&2 + exit 2 + fi + if [[ "${ssdopts[1]}" != "scsi" && "${ssdopts[1]}" != "nvme" ]]; then + echo -e "${color_red}Local SSD: Interface must be scsi or nvme, found: ${ssdopts[1]} ${color_norm}" >&2 + exit 2 + fi + if [[ "${ssdopts[2]}" != "fs" && "${ssdopts[2]}" != "block" ]]; then + echo -e "${color_red}Local SSD: Filesystem type must be fs or block, found: ${ssdopts[2]} ${color_norm}" >&2 + exit 2 + fi + local_ssd_ext_count=$((local_ssd_ext_count+ssdopts[0])) + if [[ "${local_ssd_ext_count}" -gt "${GCE_MAX_LOCAL_SSD}" || "${local_ssd_ext_count}" -lt 1 ]]; then + echo -e "${color_red}Local SSD: Total number of local ssds must range from 1 to 8, found: ${local_ssd_ext_count} ${color_norm}" >&2 + exit 2 + fi +} + +# Robustly try to create an instance template. +# $1: The name of the instance template. +# $2: The scopes flag. +# $3: String of comma-separated metadata-from-file entries. +# $4: String of comma-separated metadata (key=value) entries. +# $5: the node OS ("linux" or "windows"). +function create-node-template() { + detect-project + detect-subnetworks + local template_name="$1" + local metadata_values="$4" + local os="$5" + + # First, ensure the template doesn't exist. + # TODO(zmerlynn): To make this really robust, we need to parse the output and + # add retries. Just relying on a non-zero exit code doesn't + # distinguish an ephemeral failed call from a "not-exists". + if gcloud compute instance-templates describe "${template_name}" --project "${PROJECT}" &>/dev/null; then + echo "Instance template ${1} already exists; deleting." >&2 + if ! gcloud compute instance-templates delete "${template_name}" --project "${PROJECT}" --quiet &>/dev/null; then + echo -e "${color_yellow}Failed to delete existing instance template${color_norm}" >&2 + exit 2 + fi + fi + + local gcloud="gcloud" + + local accelerator_args="" + # VMs with Accelerators cannot be live migrated. + # More details here - https://cloud.google.com/compute/docs/gpus/add-gpus#create-new-gpu-instance + if [[ ! -z "${NODE_ACCELERATORS}" ]]; then + accelerator_args="--maintenance-policy TERMINATE --restart-on-failure --accelerator ${NODE_ACCELERATORS}" + gcloud="gcloud beta" + fi + + local preemptible_minions="" + if [[ "${PREEMPTIBLE_NODE}" == "true" ]]; then + preemptible_minions="--preemptible --maintenance-policy TERMINATE" + fi + + local local_ssds="" + local_ssd_ext_count=0 + if [[ ! -z ${NODE_LOCAL_SSDS_EXT:-} ]]; then + IFS=";" read -r -a ssdgroups <<< "${NODE_LOCAL_SSDS_EXT:-}" + for ssdgroup in "${ssdgroups[@]}" + do + IFS="," read -r -a ssdopts <<< "${ssdgroup}" + validate-node-local-ssds-ext "${ssdopts}" + for i in $(seq ${ssdopts[0]}); do + local_ssds="$local_ssds--local-ssd=interface=${ssdopts[1]} " + done + done + fi + + if [[ ! -z ${NODE_LOCAL_SSDS+x} ]]; then + # The NODE_LOCAL_SSDS check below fixes issue #49171 + # Some versions of seq will count down from 1 if "seq 0" is specified + if [[ ${NODE_LOCAL_SSDS} -ge 1 ]]; then + for i in $(seq ${NODE_LOCAL_SSDS}); do + local_ssds="$local_ssds--local-ssd=interface=SCSI " + done + fi + fi + + + local network=$(make-gcloud-network-argument \ + "${NETWORK_PROJECT}" \ + "${REGION}" \ + "${NETWORK}" \ + "${SUBNETWORK:-}" \ + "" \ + "${ENABLE_IP_ALIASES:-}" \ + "${IP_ALIAS_SIZE:-}") + + local node_image_flags="" + if [[ "${os}" == 'linux' ]]; then + node_image_flags="--image-project ${NODE_IMAGE_PROJECT} --image ${NODE_IMAGE}" + elif [[ "${os}" == 'windows' ]]; then + node_image_flags="--image-project ${WINDOWS_NODE_IMAGE_PROJECT} --image-family ${WINDOWS_NODE_IMAGE_FAMILY}" + else + echo "Unknown OS ${os}" >&2 + exit 1 + fi + + local metadata_flag="${metadata_values:+--metadata ${metadata_values}}" + + local attempt=1 + while true; do + echo "Attempt ${attempt} to create ${1}" >&2 + if ! ${gcloud} compute instance-templates create \ + "${template_name}" \ + --project "${PROJECT}" \ + --machine-type "${NODE_SIZE}" \ + --boot-disk-type "${NODE_DISK_TYPE}" \ + --boot-disk-size "${NODE_DISK_SIZE}" \ + ${node_image_flags} \ + --service-account "${NODE_SERVICE_ACCOUNT}" \ + --tags "${NODE_TAG}" \ + ${accelerator_args} \ + ${local_ssds} \ + --region "${REGION}" \ + ${network} \ + ${preemptible_minions} \ + $2 \ + --metadata-from-file $3 \ + ${metadata_flag} >&2; then + if (( attempt > 5 )); then + echo -e "${color_red}Failed to create instance template ${template_name} ${color_norm}" >&2 + exit 2 + fi + echo -e "${color_yellow}Attempt ${attempt} failed to create instance template ${template_name}. Retrying.${color_norm}" >&2 + attempt=$(($attempt+1)) + sleep $(($attempt * 5)) + + # In case the previous attempt failed with something like a + # Backend Error and left the entry laying around, delete it + # before we try again. + gcloud compute instance-templates delete "${template_name}" --project "${PROJECT}" &>/dev/null || true + else + break + fi + done +} + +# Instantiate a kubernetes cluster +# +# Assumed vars +# KUBE_ROOT +# +function kube-up() { + kube::util::ensure-temp-dir + detect-project + + load-or-gen-kube-basicauth + load-or-gen-kube-bearertoken + + # Make sure we have the tar files staged on Google Storage + find-release-tars + upload-tars + + # ensure that environmental variables specifying number of migs to create + set_num_migs + + if [[ ${KUBE_USE_EXISTING_MASTER:-} == "true" ]]; then + detect-master + parse-master-env + create-subnetworks + detect-subnetworks + # Windows nodes take longer to boot and setup so create them first. + create-windows-nodes + create-linux-nodes + elif [[ ${KUBE_REPLICATE_EXISTING_MASTER:-} == "true" ]]; then + if [[ "${MASTER_OS_DISTRIBUTION}" != "gci" && "${MASTER_OS_DISTRIBUTION}" != "ubuntu" ]]; then + echo "Master replication supported only for gci and ubuntu" + return 1 + fi + create-loadbalancer + # If replication of master fails, we need to ensure that the replica is removed from etcd clusters. + if ! replicate-master; then + remove-replica-from-etcd 2379 || true + remove-replica-from-etcd 4002 || true + fi + else + check-existing + create-network + create-subnetworks + detect-subnetworks + write-cluster-location + write-cluster-name + create-autoscaler-config + create-master + create-nodes-firewall + create-nodes-template + # Windows nodes take longer to boot and setup so create them first. + create-windows-nodes + create-linux-nodes + check-cluster + fi +} + +function check-existing() { + local running_in_terminal=false + # May be false if tty is not allocated (for example with ssh -T). + if [[ -t 1 ]]; then + running_in_terminal=true + fi + + if [[ ${running_in_terminal} == "true" || ${KUBE_UP_AUTOMATIC_CLEANUP} == "true" ]]; then + if ! check-resources; then + local run_kube_down="n" + echo "${KUBE_RESOURCE_FOUND} found." >&2 + # Get user input only if running in terminal. + if [[ ${running_in_terminal} == "true" && ${KUBE_UP_AUTOMATIC_CLEANUP} == "false" ]]; then + read -p "Would you like to shut down the old cluster (call kube-down)? [y/N] " run_kube_down + fi + if [[ ${run_kube_down} == "y" || ${run_kube_down} == "Y" || ${KUBE_UP_AUTOMATIC_CLEANUP} == "true" ]]; then + echo "... calling kube-down" >&2 + kube-down + fi + fi + fi +} + +function check-network-mode() { + local mode="$(gcloud compute networks list --filter="name=('${NETWORK}')" --project ${NETWORK_PROJECT} --format='value(x_gcloud_subnet_mode)' || true)" + # The deprecated field uses lower case. Convert to upper case for consistency. + echo "$(echo $mode | tr [a-z] [A-Z])" +} + +function create-network() { + if ! gcloud compute networks --project "${NETWORK_PROJECT}" describe "${NETWORK}" &>/dev/null; then + # The network needs to be created synchronously or we have a race. The + # firewalls can be added concurrent with instance creation. + local network_mode="auto" + if [[ "${CREATE_CUSTOM_NETWORK:-}" == "true" ]]; then + network_mode="custom" + fi + echo "Creating new ${network_mode} network: ${NETWORK}" + gcloud compute networks create --project "${NETWORK_PROJECT}" "${NETWORK}" --subnet-mode="${network_mode}" + else + PREEXISTING_NETWORK=true + PREEXISTING_NETWORK_MODE="$(check-network-mode)" + echo "Found existing network ${NETWORK} in ${PREEXISTING_NETWORK_MODE} mode." + fi + + if ! gcloud compute firewall-rules --project "${NETWORK_PROJECT}" describe "${CLUSTER_NAME}-default-internal-master" &>/dev/null; then + gcloud compute firewall-rules create "${CLUSTER_NAME}-default-internal-master" \ + --project "${NETWORK_PROJECT}" \ + --network "${NETWORK}" \ + --source-ranges "10.0.0.0/8" \ + --allow "tcp:1-2379,tcp:2382-65535,udp:1-65535,icmp" \ + --target-tags "${MASTER_TAG}"& + fi + + if ! gcloud compute firewall-rules --project "${NETWORK_PROJECT}" describe "${CLUSTER_NAME}-default-internal-node" &>/dev/null; then + gcloud compute firewall-rules create "${CLUSTER_NAME}-default-internal-node" \ + --project "${NETWORK_PROJECT}" \ + --network "${NETWORK}" \ + --source-ranges "10.0.0.0/8" \ + --allow "tcp:1-65535,udp:1-65535,icmp" \ + --target-tags "${NODE_TAG}"& + fi + + if ! gcloud compute firewall-rules describe --project "${NETWORK_PROJECT}" "${NETWORK}-default-ssh" &>/dev/null; then + gcloud compute firewall-rules create "${NETWORK}-default-ssh" \ + --project "${NETWORK_PROJECT}" \ + --network "${NETWORK}" \ + --source-ranges "0.0.0.0/0" \ + --allow "tcp:22" & + fi + + # Open up TCP 3389 to allow RDP connections. + if [[ ${NUM_WINDOWS_NODES} -gt 0 ]]; then + if ! gcloud compute firewall-rules describe --project "${NETWORK_PROJECT}" "${NETWORK}-default-rdp" &>/dev/null; then + gcloud compute firewall-rules create "${NETWORK}-default-rdp" \ + --project "${NETWORK_PROJECT}" \ + --network "${NETWORK}" \ + --source-ranges "0.0.0.0/0" \ + --allow "tcp:3389" & + fi + fi +} + +function expand-default-subnetwork() { + gcloud compute networks update "${NETWORK}" \ + --switch-to-custom-subnet-mode \ + --project "${NETWORK_PROJECT}" \ + --quiet || true + gcloud compute networks subnets expand-ip-range "${NETWORK}" \ + --region="${REGION}" \ + --project "${NETWORK_PROJECT}" \ + --prefix-length=19 \ + --quiet +} + +function create-subnetworks() { + case ${ENABLE_IP_ALIASES} in + true) echo "IP aliases are enabled. Creating subnetworks.";; + false) + echo "IP aliases are disabled." + if [[ "${ENABLE_BIG_CLUSTER_SUBNETS}" = "true" ]]; then + if [[ "${PREEXISTING_NETWORK}" != "true" ]]; then + expand-default-subnetwork + else + echo "${color_yellow}Using pre-existing network ${NETWORK}, subnets won't be expanded to /19!${color_norm}" + fi + elif [[ "${CREATE_CUSTOM_NETWORK:-}" == "true" && "${PREEXISTING_NETWORK}" != "true" ]]; then + gcloud compute networks subnets create "${SUBNETWORK}" --project "${NETWORK_PROJECT}" --region "${REGION}" --network "${NETWORK}" --range "${NODE_IP_RANGE}" + fi + return;; + *) echo "${color_red}Invalid argument to ENABLE_IP_ALIASES${color_norm}" + exit 1;; + esac + + # Look for the alias subnet, it must exist and have a secondary + # range configured. + local subnet=$(gcloud compute networks subnets describe \ + --project "${NETWORK_PROJECT}" \ + --region ${REGION} \ + ${IP_ALIAS_SUBNETWORK} 2>/dev/null) + if [[ -z ${subnet} ]]; then + echo "Creating subnet ${NETWORK}:${IP_ALIAS_SUBNETWORK}" + gcloud compute networks subnets create \ + ${IP_ALIAS_SUBNETWORK} \ + --description "Automatically generated subnet for ${INSTANCE_PREFIX} cluster. This will be removed on cluster teardown." \ + --project "${NETWORK_PROJECT}" \ + --network ${NETWORK} \ + --region ${REGION} \ + --range ${NODE_IP_RANGE} \ + --secondary-range "pods-default=${CLUSTER_IP_RANGE}" \ + --secondary-range "services-default=${SERVICE_CLUSTER_IP_RANGE}" + echo "Created subnetwork ${IP_ALIAS_SUBNETWORK}" + else + if ! echo ${subnet} | grep --quiet secondaryIpRanges; then + echo "${color_red}Subnet ${IP_ALIAS_SUBNETWORK} does not have a secondary range${color_norm}" + exit 1 + fi + fi +} + +# detect-subnetworks sets the SUBNETWORK var if not already set +# Assumed vars: +# NETWORK +# REGION +# NETWORK_PROJECT +# +# Optional vars: +# SUBNETWORK +# IP_ALIAS_SUBNETWORK +function detect-subnetworks() { + if [[ -n ${SUBNETWORK:-} ]]; then + echo "Using subnet ${SUBNETWORK}" + return 0 + fi + + if [[ -n ${IP_ALIAS_SUBNETWORK:-} ]]; then + SUBNETWORK=${IP_ALIAS_SUBNETWORK} + echo "Using IP Alias subnet ${SUBNETWORK}" + return 0 + fi + + SUBNETWORK=$(gcloud compute networks subnets list \ + --network=${NETWORK} \ + --regions=${REGION} \ + --project=${NETWORK_PROJECT} \ + --limit=1 \ + --format='value(name)' 2>/dev/null) + + if [[ -n ${SUBNETWORK:-} ]]; then + echo "Found subnet for region ${REGION} in network ${NETWORK}: ${SUBNETWORK}" + return 0 + fi + + echo "${color_red}Could not find subnetwork with region ${REGION}, network ${NETWORK}, and project ${NETWORK_PROJECT}" +} + +function delete-all-firewall-rules() { + if fws=$(gcloud compute firewall-rules list --project "${NETWORK_PROJECT}" --filter="network=${NETWORK}" --format="value(name)"); then + echo "Deleting firewall rules remaining in network ${NETWORK}: ${fws}" + delete-firewall-rules "$fws" + else + echo "Failed to list firewall rules from the network ${NETWORK}" + fi +} + +# Ignores firewall rule arguments that do not exist in NETWORK_PROJECT. +function delete-firewall-rules() { + for fw in $@; do + if [[ -n $(gcloud compute firewall-rules --project "${NETWORK_PROJECT}" describe "${fw}" --format='value(name)' 2>/dev/null || true) ]]; then + gcloud compute firewall-rules delete --project "${NETWORK_PROJECT}" --quiet "${fw}" & + fi + done + kube::util::wait-for-jobs || { + echo -e "${color_red}Failed to delete firewall rules.${color_norm}" >&2 + } +} + +function delete-network() { + if [[ -n $(gcloud compute networks --project "${NETWORK_PROJECT}" describe "${NETWORK}" --format='value(name)' 2>/dev/null || true) ]]; then + if ! gcloud compute networks delete --project "${NETWORK_PROJECT}" --quiet "${NETWORK}"; then + echo "Failed to delete network '${NETWORK}'. Listing firewall-rules:" + gcloud compute firewall-rules --project "${NETWORK_PROJECT}" list --filter="network=${NETWORK}" + return 1 + fi + fi +} + +function delete-subnetworks() { + # If running in custom mode network we need to delete subnets manually. + mode="$(check-network-mode)" + if [[ "${mode}" == "CUSTOM" ]]; then + if [[ "${ENABLE_BIG_CLUSTER_SUBNETS}" = "true" ]]; then + echo "Deleting default subnets..." + # This value should be kept in sync with number of regions. + local parallelism=9 + gcloud compute networks subnets list --network="${NETWORK}" --project "${NETWORK_PROJECT}" --format='value(region.basename())' | \ + xargs -i -P ${parallelism} gcloud --quiet compute networks subnets delete "${NETWORK}" --project "${NETWORK_PROJECT}" --region="{}" || true + elif [[ "${CREATE_CUSTOM_NETWORK:-}" == "true" ]]; then + echo "Deleting custom subnet..." + gcloud --quiet compute networks subnets delete "${SUBNETWORK}" --project "${NETWORK_PROJECT}" --region="${REGION}" || true + fi + return + fi + + # If we reached here, it means we're not using custom network. + # So the only thing we need to check is if IP-aliases was turned + # on and we created a subnet for it. If so, we should delete it. + if [[ ${ENABLE_IP_ALIASES:-} == "true" ]]; then + # Only delete the subnet if we created it (i.e it's not pre-existing). + if [[ -z "${KUBE_GCE_IP_ALIAS_SUBNETWORK:-}" ]]; then + echo "Removing auto-created subnet ${NETWORK}:${IP_ALIAS_SUBNETWORK}" + if [[ -n $(gcloud compute networks subnets describe \ + --project "${NETWORK_PROJECT}" \ + --region ${REGION} \ + ${IP_ALIAS_SUBNETWORK} 2>/dev/null) ]]; then + gcloud --quiet compute networks subnets delete \ + --project "${NETWORK_PROJECT}" \ + --region ${REGION} \ + ${IP_ALIAS_SUBNETWORK} + fi + fi + fi +} + +# Generates SSL certificates for etcd cluster peer to peer communication. Uses cfssl program. +# +# Assumed vars: +# KUBE_TEMP: temporary directory +# +# Args: +# $1: host name +# $2: CA certificate +# $3: CA key +# +# If CA cert/key is empty, the function will also generate certs for CA. +# +# Vars set: +# ETCD_CA_KEY_BASE64 +# ETCD_CA_CERT_BASE64 +# ETCD_PEER_KEY_BASE64 +# ETCD_PEER_CERT_BASE64 +# +function create-etcd-certs { + local host=${1} + local ca_cert=${2:-} + local ca_key=${3:-} + + GEN_ETCD_CA_CERT="${ca_cert}" GEN_ETCD_CA_KEY="${ca_key}" \ + generate-etcd-cert "${KUBE_TEMP}/cfssl" "${host}" "peer" "peer" + + pushd "${KUBE_TEMP}/cfssl" + ETCD_CA_KEY_BASE64=$(cat "ca-key.pem" | base64 | tr -d '\r\n') + ETCD_CA_CERT_BASE64=$(cat "ca.pem" | gzip | base64 | tr -d '\r\n') + ETCD_PEER_KEY_BASE64=$(cat "peer-key.pem" | base64 | tr -d '\r\n') + ETCD_PEER_CERT_BASE64=$(cat "peer.pem" | gzip | base64 | tr -d '\r\n') + popd +} + +# Generates SSL certificates for etcd-client and kube-apiserver communication. Uses cfssl program. +# +# Assumed vars: +# KUBE_TEMP: temporary directory +# +# Args: +# $1: host server name +# $2: host client name +# $3: CA certificate +# $4: CA key +# +# If CA cert/key is empty, the function will also generate certs for CA. +# +# Vars set: +# ETCD_APISERVER_CA_KEY_BASE64 +# ETCD_APISERVER_CA_CERT_BASE64 +# ETCD_APISERVER_SERVER_KEY_BASE64 +# ETCD_APISERVER_SERVER_CERT_BASE64 +# ETCD_APISERVER_CLIENT_KEY_BASE64 +# ETCD_APISERVER_CLIENT_CERT_BASE64 +# +function create-etcd-apiserver-certs { + local hostServer=${1} + local hostClient=${2} + local etcd_apiserver_ca_cert=${3:-} + local etcd_apiserver_ca_key=${4:-} + + GEN_ETCD_CA_CERT="${etcd_apiserver_ca_cert}" GEN_ETCD_CA_KEY="${etcd_apiserver_ca_key}" \ + generate-etcd-cert "${KUBE_TEMP}/cfssl" "${hostServer}" "server" "etcd-apiserver-server" + generate-etcd-cert "${KUBE_TEMP}/cfssl" "${hostClient}" "client" "etcd-apiserver-client" + + pushd "${KUBE_TEMP}/cfssl" + ETCD_APISERVER_CA_KEY_BASE64=$(cat "ca-key.pem" | base64 | tr -d '\r\n') + ETCD_APISERVER_CA_CERT_BASE64=$(cat "ca.pem" | gzip | base64 | tr -d '\r\n') + ETCD_APISERVER_SERVER_KEY_BASE64=$(cat "etcd-apiserver-server-key.pem" | base64 | tr -d '\r\n') + ETCD_APISERVER_SERVER_CERT_BASE64=$(cat "etcd-apiserver-server.pem" | gzip | base64 | tr -d '\r\n') + ETCD_APISERVER_CLIENT_KEY_BASE64=$(cat "etcd-apiserver-client-key.pem" | base64 | tr -d '\r\n') + ETCD_APISERVER_CLIENT_CERT_BASE64=$(cat "etcd-apiserver-client.pem" | gzip | base64 | tr -d '\r\n') + popd +} + + +function create-master() { + echo "Starting master and configuring firewalls" + gcloud compute firewall-rules create "${MASTER_NAME}-https" \ + --project "${NETWORK_PROJECT}" \ + --network "${NETWORK}" \ + --target-tags "${MASTER_TAG}" \ + --allow tcp:443 & + + # We have to make sure the disk is created before creating the master VM, so + # run this in the foreground. + gcloud compute disks create "${MASTER_NAME}-pd" \ + --project "${PROJECT}" \ + --zone "${ZONE}" \ + --type "${MASTER_DISK_TYPE}" \ + --size "${MASTER_DISK_SIZE}" + + # Create rule for accessing and securing etcd servers. + if ! gcloud compute firewall-rules --project "${NETWORK_PROJECT}" describe "${MASTER_NAME}-etcd" &>/dev/null; then + gcloud compute firewall-rules create "${MASTER_NAME}-etcd" \ + --project "${NETWORK_PROJECT}" \ + --network "${NETWORK}" \ + --source-tags "${MASTER_TAG}" \ + --allow "tcp:2380,tcp:2381" \ + --target-tags "${MASTER_TAG}" & + fi + + # Generate a bearer token for this cluster. We push this separately + # from the other cluster variables so that the client (this + # computer) can forget it later. This should disappear with + # http://issue.k8s.io/3168 + KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) + if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then + NODE_PROBLEM_DETECTOR_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) + fi + + # Reserve the master's IP so that it can later be transferred to another VM + # without disrupting the kubelets. + create-static-ip "${MASTER_NAME}-ip" "${REGION}" + MASTER_RESERVED_IP=$(gcloud compute addresses describe "${MASTER_NAME}-ip" \ + --project "${PROJECT}" --region "${REGION}" -q --format='value(address)') + + if [[ "${REGISTER_MASTER_KUBELET:-}" == "true" ]]; then + KUBELET_APISERVER="${MASTER_RESERVED_IP}" + fi + + KUBERNETES_MASTER_NAME="${MASTER_RESERVED_IP}" + MASTER_ADVERTISE_ADDRESS="${MASTER_RESERVED_IP}" + + create-certs "${MASTER_RESERVED_IP}" + create-etcd-certs ${MASTER_NAME} + create-etcd-apiserver-certs "etcd-${MASTER_NAME}" ${MASTER_NAME} + + if [[ "$(get-num-nodes)" -ge "50" ]]; then + # We block on master creation for large clusters to avoid doing too much + # unnecessary work in case master start-up fails (like creation of nodes). + create-master-instance "${MASTER_RESERVED_IP}" + else + create-master-instance "${MASTER_RESERVED_IP}" & + fi +} + +# Adds master replica to etcd cluster. +# +# Assumed vars: +# REPLICA_NAME +# PROJECT +# EXISTING_MASTER_NAME +# EXISTING_MASTER_ZONE +# +# $1: etcd client port +# $2: etcd internal port +# returns the result of ssh command which adds replica +function add-replica-to-etcd() { + local -r client_port="${1}" + local -r internal_port="${2}" + gcloud compute ssh "${EXISTING_MASTER_NAME}" \ + --project "${PROJECT}" \ + --zone "${EXISTING_MASTER_ZONE}" \ + --command \ + "curl localhost:${client_port}/v2/members -XPOST -H \"Content-Type: application/json\" -d '{\"peerURLs\":[\"https://${REPLICA_NAME}:${internal_port}\"]}' -s" + return $? +} + +# Sets EXISTING_MASTER_NAME and EXISTING_MASTER_ZONE variables. +# +# Assumed vars: +# PROJECT +# +# NOTE: Must be in sync with get-replica-name-regexp +function set-existing-master() { + local existing_master=$(gcloud compute instances list \ + --project "${PROJECT}" \ + --filter "name ~ '$(get-replica-name-regexp)'" \ + --format "value(name,zone)" | head -n1) + EXISTING_MASTER_NAME="$(echo "${existing_master}" | cut -f1)" + EXISTING_MASTER_ZONE="$(echo "${existing_master}" | cut -f2)" +} + +function replicate-master() { + set-replica-name + set-existing-master + + echo "Experimental: replicating existing master ${EXISTING_MASTER_ZONE}/${EXISTING_MASTER_NAME} as ${ZONE}/${REPLICA_NAME}" + + # Before we do anything else, we should configure etcd to expect more replicas. + if ! add-replica-to-etcd 2379 2380; then + echo "Failed to add master replica to etcd cluster." + return 1 + fi + if ! add-replica-to-etcd 4002 2381; then + echo "Failed to add master replica to etcd events cluster." + return 1 + fi + + # We have to make sure the disk is created before creating the master VM, so + # run this in the foreground. + gcloud compute disks create "${REPLICA_NAME}-pd" \ + --project "${PROJECT}" \ + --zone "${ZONE}" \ + --type "${MASTER_DISK_TYPE}" \ + --size "${MASTER_DISK_SIZE}" + + local existing_master_replicas="$(get-all-replica-names)" + replicate-master-instance "${EXISTING_MASTER_ZONE}" "${EXISTING_MASTER_NAME}" "${existing_master_replicas}" + + # Add new replica to the load balancer. + gcloud compute target-pools add-instances "${MASTER_NAME}" \ + --project "${PROJECT}" \ + --zone "${ZONE}" \ + --instances "${REPLICA_NAME}" +} + +# Detaches old and ataches new external IP to a VM. +# +# Arguments: +# $1 - VM name +# $2 - VM zone +# $3 - external static IP; if empty will use an ephemeral IP address. +function attach-external-ip() { + local NAME=${1} + local ZONE=${2} + local IP_ADDR=${3:-} + local ACCESS_CONFIG_NAME=$(gcloud compute instances describe "${NAME}" \ + --project "${PROJECT}" --zone "${ZONE}" \ + --format="value(networkInterfaces[0].accessConfigs[0].name)") + gcloud compute instances delete-access-config "${NAME}" \ + --project "${PROJECT}" --zone "${ZONE}" \ + --access-config-name "${ACCESS_CONFIG_NAME}" + if [[ -z ${IP_ADDR} ]]; then + gcloud compute instances add-access-config "${NAME}" \ + --project "${PROJECT}" --zone "${ZONE}" \ + --access-config-name "${ACCESS_CONFIG_NAME}" + else + gcloud compute instances add-access-config "${NAME}" \ + --project "${PROJECT}" --zone "${ZONE}" \ + --access-config-name "${ACCESS_CONFIG_NAME}" \ + --address "${IP_ADDR}" + fi +} + +# Creates load balancer in front of apiserver if it doesn't exists already. Assumes there's only one +# existing master replica. +# +# Assumes: +# PROJECT +# MASTER_NAME +# ZONE +# REGION +function create-loadbalancer() { + detect-master + + # Step 0: Return early if LB is already configured. + if gcloud compute forwarding-rules describe ${MASTER_NAME} \ + --project "${PROJECT}" --region ${REGION} > /dev/null 2>&1; then + echo "Load balancer already exists" + return + fi + + local EXISTING_MASTER_NAME="$(get-all-replica-names)" + local EXISTING_MASTER_ZONE=$(gcloud compute instances list "${EXISTING_MASTER_NAME}" \ + --project "${PROJECT}" --format="value(zone)") + + echo "Creating load balancer in front of an already existing master in ${EXISTING_MASTER_ZONE}" + + # Step 1: Detach master IP address and attach ephemeral address to the existing master + attach-external-ip "${EXISTING_MASTER_NAME}" "${EXISTING_MASTER_ZONE}" + + # Step 2: Create target pool. + gcloud compute target-pools create "${MASTER_NAME}" --project "${PROJECT}" --region "${REGION}" + # TODO: We should also add master instances with suffixes + gcloud compute target-pools add-instances "${MASTER_NAME}" --instances "${EXISTING_MASTER_NAME}" --project "${PROJECT}" --zone "${EXISTING_MASTER_ZONE}" + + # Step 3: Create forwarding rule. + # TODO: This step can take up to 20 min. We need to speed this up... + gcloud compute forwarding-rules create ${MASTER_NAME} \ + --project "${PROJECT}" --region ${REGION} \ + --target-pool ${MASTER_NAME} --address=${KUBE_MASTER_IP} --ports=443 + + echo -n "Waiting for the load balancer configuration to propagate..." + local counter=0 + until $(curl -k -m1 https://${KUBE_MASTER_IP} &> /dev/null); do + counter=$((counter+1)) + echo -n . + if [[ ${counter} -ge 1800 ]]; then + echo -e "${color_red}TIMEOUT${color_norm}" >&2 + echo -e "${color_red}Load balancer failed to initialize within ${counter} seconds.${color_norm}" >&2 + exit 2 + fi + done + echo "DONE" +} + +function create-nodes-firewall() { + # Create a single firewall rule for all minions. + create-firewall-rule "${NODE_TAG}-all" "${CLUSTER_IP_RANGE}" "${NODE_TAG}" & + + # Report logging choice (if any). + if [[ "${ENABLE_NODE_LOGGING-}" == "true" ]]; then + echo "+++ Logging using Fluentd to ${LOGGING_DESTINATION:-unknown}" + fi + + # Wait for last batch of jobs + kube::util::wait-for-jobs || { + echo -e "${color_red}Some commands failed.${color_norm}" >&2 + } +} + +function get-scope-flags() { + local scope_flags= + if [[ -n "${NODE_SCOPES}" ]]; then + scope_flags="--scopes ${NODE_SCOPES}" + else + scope_flags="--no-scopes" + fi + echo "${scope_flags}" +} + +function create-nodes-template() { + echo "Creating nodes." + + local scope_flags=$(get-scope-flags) + + write-linux-node-env + write-windows-node-env + + # NOTE: these template names and their format must match + # create-[linux,windows]-nodes() as well as get-template()! + local linux_template_name="${NODE_INSTANCE_PREFIX}-template" + local windows_template_name="${WINDOWS_NODE_INSTANCE_PREFIX}-template" + create-linux-node-instance-template $linux_template_name + create-windows-node-instance-template $windows_template_name "${scope_flags[*]}" +} + +# Assumes: +# - MAX_INSTANCES_PER_MIG +# - NUM_NODES +# - NUM_WINDOWS_NODES +# exports: +# - NUM_MIGS +# - NUM_WINDOWS_MIGS +function set_num_migs() { + local defaulted_max_instances_per_mig=${MAX_INSTANCES_PER_MIG:-1000} + + if [[ ${defaulted_max_instances_per_mig} -le "0" ]]; then + echo "MAX_INSTANCES_PER_MIG cannot be negative. Assuming default 1000" + defaulted_max_instances_per_mig=1000 + fi + export NUM_MIGS=$(((${NUM_NODES} + ${defaulted_max_instances_per_mig} - 1) / ${defaulted_max_instances_per_mig})) + export NUM_WINDOWS_MIGS=$(((${NUM_WINDOWS_NODES} + ${defaulted_max_instances_per_mig} - 1) / ${defaulted_max_instances_per_mig})) +} + +# Assumes: +# - NUM_MIGS +# - NODE_INSTANCE_PREFIX +# - NUM_NODES +# - PROJECT +# - ZONE +function create-linux-nodes() { + local template_name="${NODE_INSTANCE_PREFIX}-template" + + if [[ -z "${HEAPSTER_MACHINE_TYPE:-}" ]]; then + local -r nodes="${NUM_NODES}" + else + echo "Creating a special node for heapster with machine-type ${HEAPSTER_MACHINE_TYPE}" + create-heapster-node + local -r nodes=$(( NUM_NODES - 1 )) + fi + + local instances_left=${nodes} + + for ((i=1; i<=${NUM_MIGS}; i++)); do + local group_name="${NODE_INSTANCE_PREFIX}-group-$i" + if [[ $i == ${NUM_MIGS} ]]; then + # TODO: We don't add a suffix for the last group to keep backward compatibility when there's only one MIG. + # We should change it at some point, but note #18545 when changing this. + group_name="${NODE_INSTANCE_PREFIX}-group" + fi + # Spread the remaining number of nodes evenly + this_mig_size=$((${instances_left} / (${NUM_MIGS}-${i}+1))) + instances_left=$((instances_left-${this_mig_size})) + + gcloud compute instance-groups managed \ + create "${group_name}" \ + --project "${PROJECT}" \ + --zone "${ZONE}" \ + --base-instance-name "${group_name}" \ + --size "${this_mig_size}" \ + --template "${template_name}" || true; + gcloud compute instance-groups managed wait-until-stable \ + "${group_name}" \ + --zone "${ZONE}" \ + --project "${PROJECT}" \ + --timeout "${MIG_WAIT_UNTIL_STABLE_TIMEOUT}" || true & + done + wait +} + +# Assumes: +# - NUM_WINDOWS_MIGS +# - WINDOWS_NODE_INSTANCE_PREFIX +# - NUM_WINDOWS_NODES +# - PROJECT +# - ZONE +function create-windows-nodes() { + local template_name="${WINDOWS_NODE_INSTANCE_PREFIX}-template" + + local -r nodes="${NUM_WINDOWS_NODES}" + local instances_left=${nodes} + + for ((i=1; i<=${NUM_WINDOWS_MIGS}; i++)); do + local group_name="${WINDOWS_NODE_INSTANCE_PREFIX}-group-$i" + if [[ $i == ${NUM_WINDOWS_MIGS} ]]; then + # TODO: We don't add a suffix for the last group to keep backward compatibility when there's only one MIG. + # We should change it at some point, but note #18545 when changing this. + group_name="${WINDOWS_NODE_INSTANCE_PREFIX}-group" + fi + # Spread the remaining number of nodes evenly + this_mig_size=$((${instances_left} / (${NUM_WINDOWS_MIGS}-${i}+1))) + instances_left=$((instances_left-${this_mig_size})) + + gcloud compute instance-groups managed \ + create "${group_name}" \ + --project "${PROJECT}" \ + --zone "${ZONE}" \ + --base-instance-name "${group_name}" \ + --size "${this_mig_size}" \ + --template "${template_name}" || true; + gcloud compute instance-groups managed wait-until-stable \ + "${group_name}" \ + --zone "${ZONE}" \ + --project "${PROJECT}" \ + --timeout "${MIG_WAIT_UNTIL_STABLE_TIMEOUT}" || true; + done +} + +# Assumes: +# - NODE_INSTANCE_PREFIX +# - PROJECT +# - NETWORK_PROJECT +# - REGION +# - ZONE +# - HEAPSTER_MACHINE_TYPE +# - NODE_DISK_TYPE +# - NODE_DISK_SIZE +# - NODE_IMAGE_PROJECT +# - NODE_IMAGE +# - NODE_SERVICE_ACCOUNT +# - NODE_TAG +# - NETWORK +# - ENABLE_IP_ALIASES +# - SUBNETWORK +# - IP_ALIAS_SIZE +function create-heapster-node() { + local gcloud="gcloud" + + local network=$(make-gcloud-network-argument \ + "${NETWORK_PROJECT}" \ + "${REGION}" \ + "${NETWORK}" \ + "${SUBNETWORK:-}" \ + "" \ + "${ENABLE_IP_ALIASES:-}" \ + "${IP_ALIAS_SIZE:-}") + + ${gcloud} compute instances \ + create "${NODE_INSTANCE_PREFIX}-heapster" \ + --project "${PROJECT}" \ + --zone "${ZONE}" \ + --machine-type="${HEAPSTER_MACHINE_TYPE}" \ + --boot-disk-type "${NODE_DISK_TYPE}" \ + --boot-disk-size "${NODE_DISK_SIZE}" \ + --image-project="${NODE_IMAGE_PROJECT}" \ + --image "${NODE_IMAGE}" \ + --service-account "${NODE_SERVICE_ACCOUNT}" \ + --tags "${NODE_TAG}" \ + ${network} \ + $(get-scope-flags) \ + --metadata-from-file "$(get-node-instance-metadata-from-file)" +} + +# Assumes: +# - NUM_MIGS +# - NODE_INSTANCE_PREFIX +# - PROJECT +# - ZONE +# - AUTOSCALER_MAX_NODES +# - AUTOSCALER_MIN_NODES +# Exports +# - AUTOSCALER_MIG_CONFIG +function create-cluster-autoscaler-mig-config() { + + # Each MIG must have at least one node, so the min number of nodes + # must be greater or equal to the number of migs. + if [[ ${AUTOSCALER_MIN_NODES} -lt 0 ]]; then + echo "AUTOSCALER_MIN_NODES must be greater or equal 0" + exit 2 + fi + + # Each MIG must have at least one node, so the min number of nodes + # must be greater or equal to the number of migs. + if [[ ${AUTOSCALER_MAX_NODES} -lt ${NUM_MIGS} ]]; then + echo "AUTOSCALER_MAX_NODES must be greater or equal ${NUM_MIGS}" + exit 2 + fi + if [[ ${NUM_WINDOWS_MIGS} -gt 0 ]]; then + # TODO(pjh): implement Windows support in this function. + echo "Not implemented yet: autoscaler config for Windows MIGs" + exit 2 + fi + + # The code assumes that the migs were created with create-nodes + # function which tries to evenly spread nodes across the migs. + AUTOSCALER_MIG_CONFIG="" + + local left_min=${AUTOSCALER_MIN_NODES} + local left_max=${AUTOSCALER_MAX_NODES} + + for ((i=1; i<=${NUM_MIGS}; i++)); do + local group_name="${NODE_INSTANCE_PREFIX}-group-$i" + if [[ $i == ${NUM_MIGS} ]]; then + # TODO: We don't add a suffix for the last group to keep backward compatibility when there's only one MIG. + # We should change it at some point, but note #18545 when changing this. + group_name="${NODE_INSTANCE_PREFIX}-group" + fi + + this_mig_min=$((${left_min}/(${NUM_MIGS}-${i}+1))) + this_mig_max=$((${left_max}/(${NUM_MIGS}-${i}+1))) + left_min=$((left_min-$this_mig_min)) + left_max=$((left_max-$this_mig_max)) + + local mig_url="https://www.googleapis.com/compute/v1/projects/${PROJECT}/zones/${ZONE}/instanceGroups/${group_name}" + AUTOSCALER_MIG_CONFIG="${AUTOSCALER_MIG_CONFIG} --nodes=${this_mig_min}:${this_mig_max}:${mig_url}" + done + + AUTOSCALER_MIG_CONFIG="${AUTOSCALER_MIG_CONFIG} --scale-down-enabled=${AUTOSCALER_ENABLE_SCALE_DOWN}" +} + +# Assumes: +# - NUM_MIGS +# - NODE_INSTANCE_PREFIX +# - PROJECT +# - ZONE +# - ENABLE_CLUSTER_AUTOSCALER +# - AUTOSCALER_MAX_NODES +# - AUTOSCALER_MIN_NODES +function create-autoscaler-config() { + # Create autoscaler for nodes configuration if requested + if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then + create-cluster-autoscaler-mig-config + echo "Using autoscaler config: ${AUTOSCALER_MIG_CONFIG} ${AUTOSCALER_EXPANDER_CONFIG}" + fi +} + +function check-cluster() { + detect-node-names + detect-master + + echo "Waiting up to ${KUBE_CLUSTER_INITIALIZATION_TIMEOUT} seconds for cluster initialization." + echo + echo " This will continually check to see if the API for kubernetes is reachable." + echo " This may time out if there was some uncaught error during start up." + echo + + # curl in mavericks is borked. + secure="" + if which sw_vers >& /dev/null; then + if [[ $(sw_vers | grep ProductVersion | awk '{print $2}') = "10.9."* ]]; then + secure="--insecure" + fi + fi + + local start_time=$(date +%s) + local curl_out=$(mktemp) + kube::util::trap_add "rm -f ${curl_out}" EXIT + until curl --cacert "${CERT_DIR}/pki/ca.crt" \ + -H "Authorization: Bearer ${KUBE_BEARER_TOKEN}" \ + ${secure} \ + --max-time 5 --fail \ + "https://${KUBE_MASTER_IP}/api/v1/pods?limit=100" > "${curl_out}" 2>&1; do + local elapsed=$(($(date +%s) - ${start_time})) + if [[ ${elapsed} -gt ${KUBE_CLUSTER_INITIALIZATION_TIMEOUT} ]]; then + echo -e "${color_red}Cluster failed to initialize within ${KUBE_CLUSTER_INITIALIZATION_TIMEOUT} seconds.${color_norm}" >&2 + echo "Last output from querying API server follows:" >&2 + echo "-----------------------------------------------------" >&2 + cat "${curl_out}" >&2 + echo "-----------------------------------------------------" >&2 + exit 2 + fi + printf "." + sleep 2 + done + + echo "Kubernetes cluster created." + + export KUBE_CERT="${CERT_DIR}/pki/issued/kubecfg.crt" + export KUBE_KEY="${CERT_DIR}/pki/private/kubecfg.key" + export CA_CERT="${CERT_DIR}/pki/ca.crt" + export CONTEXT="${PROJECT}_${INSTANCE_PREFIX}" + ( + umask 077 + + # Update the user's kubeconfig to include credentials for this apiserver. + create-kubeconfig + ) + + # ensures KUBECONFIG is set + get-kubeconfig-basicauth + echo + echo -e "${color_green}Kubernetes cluster is running. The master is running at:" + echo + echo -e "${color_yellow} https://${KUBE_MASTER_IP}" + echo + echo -e "${color_green}The user name and password to use is located in ${KUBECONFIG}.${color_norm}" + echo + +} + +# Removes master replica from etcd cluster. +# +# Assumed vars: +# REPLICA_NAME +# PROJECT +# EXISTING_MASTER_NAME +# EXISTING_MASTER_ZONE +# +# $1: etcd client port +# returns the result of ssh command which removes replica +function remove-replica-from-etcd() { + local -r port="${1}" + [[ -n "${EXISTING_MASTER_NAME}" ]] || return + gcloud compute ssh "${EXISTING_MASTER_NAME}" \ + --project "${PROJECT}" \ + --zone "${EXISTING_MASTER_ZONE}" \ + --command \ + "curl -s localhost:${port}/v2/members/\$(curl -s localhost:${port}/v2/members -XGET | sed 's/{\\\"id/\n/g' | grep ${REPLICA_NAME}\\\" | cut -f 3 -d \\\") -XDELETE -L 2>/dev/null" + local -r res=$? + echo "Removing etcd replica, name: ${REPLICA_NAME}, port: ${port}, result: ${res}" + return "${res}" +} + +# Delete a kubernetes cluster. This is called from test-teardown. +# +# Assumed vars: +# MASTER_NAME +# NODE_INSTANCE_PREFIX +# WINDOWS_NODE_INSTANCE_PREFIX +# ZONE +# This function tears down cluster resources 10 at a time to avoid issuing too many +# API calls and exceeding API quota. It is important to bring down the instances before bringing +# down the firewall rules and routes. +function kube-down() { + local -r batch=200 + + detect-project + detect-node-names # For INSTANCE_GROUPS and WINDOWS_INSTANCE_GROUPS + + echo "Bringing down cluster" + set +e # Do not stop on error + + if [[ "${KUBE_DELETE_NODES:-}" != "false" ]]; then + # Get the name of the managed instance group template before we delete the + # managed instance group. (The name of the managed instance group template may + # change during a cluster upgrade.) + local templates=$(get-template "${PROJECT}") + + local all_instance_groups=(${INSTANCE_GROUPS[@]} ${WINDOWS_INSTANCE_GROUPS[@]}) + for group in ${all_instance_groups[@]:-}; do + if gcloud compute instance-groups managed describe "${group}" --project "${PROJECT}" --zone "${ZONE}" &>/dev/null; then + gcloud compute instance-groups managed delete \ + --project "${PROJECT}" \ + --quiet \ + --zone "${ZONE}" \ + "${group}" & + fi + done + + # Wait for last batch of jobs + kube::util::wait-for-jobs || { + echo -e "Failed to delete instance group(s)." >&2 + } + + for template in ${templates[@]:-}; do + if gcloud compute instance-templates describe --project "${PROJECT}" "${template}" &>/dev/null; then + gcloud compute instance-templates delete \ + --project "${PROJECT}" \ + --quiet \ + "${template}" + fi + done + + # Delete the special heapster node (if it exists). + if [[ -n "${HEAPSTER_MACHINE_TYPE:-}" ]]; then + local -r heapster_machine_name="${NODE_INSTANCE_PREFIX}-heapster" + if gcloud compute instances describe "${heapster_machine_name}" --zone "${ZONE}" --project "${PROJECT}" &>/dev/null; then + # Now we can safely delete the VM. + gcloud compute instances delete \ + --project "${PROJECT}" \ + --quiet \ + --delete-disks all \ + --zone "${ZONE}" \ + "${heapster_machine_name}" + fi + fi + fi + + local -r REPLICA_NAME="${KUBE_REPLICA_NAME:-$(get-replica-name)}" + + set-existing-master + + # Un-register the master replica from etcd and events etcd. + remove-replica-from-etcd 2379 + remove-replica-from-etcd 4002 + + # Delete the master replica (if it exists). + if gcloud compute instances describe "${REPLICA_NAME}" --zone "${ZONE}" --project "${PROJECT}" &>/dev/null; then + # If there is a load balancer in front of apiservers we need to first update its configuration. + if gcloud compute target-pools describe "${MASTER_NAME}" --region "${REGION}" --project "${PROJECT}" &>/dev/null; then + gcloud compute target-pools remove-instances "${MASTER_NAME}" \ + --project "${PROJECT}" \ + --zone "${ZONE}" \ + --instances "${REPLICA_NAME}" + fi + # Now we can safely delete the VM. + gcloud compute instances delete \ + --project "${PROJECT}" \ + --quiet \ + --delete-disks all \ + --zone "${ZONE}" \ + "${REPLICA_NAME}" + fi + + # Delete the master replica pd (possibly leaked by kube-up if master create failed). + # TODO(jszczepkowski): remove also possibly leaked replicas' pds + local -r replica_pd="${REPLICA_NAME:-${MASTER_NAME}}-pd" + if gcloud compute disks describe "${replica_pd}" --zone "${ZONE}" --project "${PROJECT}" &>/dev/null; then + gcloud compute disks delete \ + --project "${PROJECT}" \ + --quiet \ + --zone "${ZONE}" \ + "${replica_pd}" + fi + + # Check if this are any remaining master replicas. + local REMAINING_MASTER_COUNT=$(gcloud compute instances list \ + --project "${PROJECT}" \ + --filter="name ~ '$(get-replica-name-regexp)'" \ + --format "value(zone)" | wc -l) + + # In the replicated scenario, if there's only a single master left, we should also delete load balancer in front of it. + if [[ "${REMAINING_MASTER_COUNT}" -eq 1 ]]; then + if gcloud compute forwarding-rules describe "${MASTER_NAME}" --region "${REGION}" --project "${PROJECT}" &>/dev/null; then + detect-master + local REMAINING_REPLICA_NAME="$(get-all-replica-names)" + local REMAINING_REPLICA_ZONE=$(gcloud compute instances list "${REMAINING_REPLICA_NAME}" \ + --project "${PROJECT}" --format="value(zone)") + gcloud compute forwarding-rules delete \ + --project "${PROJECT}" \ + --region "${REGION}" \ + --quiet \ + "${MASTER_NAME}" + attach-external-ip "${REMAINING_REPLICA_NAME}" "${REMAINING_REPLICA_ZONE}" "${KUBE_MASTER_IP}" + gcloud compute target-pools delete \ + --project "${PROJECT}" \ + --region "${REGION}" \ + --quiet \ + "${MASTER_NAME}" + fi + fi + + # If there are no more remaining master replicas, we should delete all remaining network resources. + if [[ "${REMAINING_MASTER_COUNT}" -eq 0 ]]; then + # Delete firewall rule for the master, etcd servers, and nodes. + delete-firewall-rules "${MASTER_NAME}-https" "${MASTER_NAME}-etcd" "${NODE_TAG}-all" + # Delete the master's reserved IP + if gcloud compute addresses describe "${MASTER_NAME}-ip" --region "${REGION}" --project "${PROJECT}" &>/dev/null; then + gcloud compute addresses delete \ + --project "${PROJECT}" \ + --region "${REGION}" \ + --quiet \ + "${MASTER_NAME}-ip" + fi + fi + + if [[ "${KUBE_DELETE_NODES:-}" != "false" ]]; then + # Find out what minions are running. + local -a minions + minions=( $(gcloud compute instances list \ + --project "${PROJECT}" \ + --filter="(name ~ '${NODE_INSTANCE_PREFIX}-.+' OR name ~ '${WINDOWS_NODE_INSTANCE_PREFIX}-.+') AND zone:(${ZONE})" \ + --format='value(name)') ) + # If any minions are running, delete them in batches. + while (( "${#minions[@]}" > 0 )); do + echo Deleting nodes "${minions[*]::${batch}}" + gcloud compute instances delete \ + --project "${PROJECT}" \ + --quiet \ + --delete-disks boot \ + --zone "${ZONE}" \ + "${minions[@]::${batch}}" + minions=( "${minions[@]:${batch}}" ) + done + fi + + # If there are no more remaining master replicas: delete routes, pd for influxdb and update kubeconfig + if [[ "${REMAINING_MASTER_COUNT}" -eq 0 ]]; then + # Delete routes. + local -a routes + # Clean up all routes w/ names like "-" + # e.g. "kubernetes-12345678-90ab-cdef-1234-567890abcdef". The name is + # determined by the node controller on the master. + # Note that this is currently a noop, as synchronously deleting the node MIG + # first allows the master to cleanup routes itself. + local TRUNCATED_PREFIX="${INSTANCE_PREFIX:0:26}" + routes=( $(gcloud compute routes list --project "${NETWORK_PROJECT}" \ + --filter="name ~ '${TRUNCATED_PREFIX}-.{8}-.{4}-.{4}-.{4}-.{12}'" \ + --format='value(name)') ) + while (( "${#routes[@]}" > 0 )); do + echo Deleting routes "${routes[*]::${batch}}" + gcloud compute routes delete \ + --project "${NETWORK_PROJECT}" \ + --quiet \ + "${routes[@]::${batch}}" + routes=( "${routes[@]:${batch}}" ) + done + + # Delete persistent disk for influx-db. + if gcloud compute disks describe "${INSTANCE_PREFIX}"-influxdb-pd --zone "${ZONE}" --project "${PROJECT}" &>/dev/null; then + gcloud compute disks delete \ + --project "${PROJECT}" \ + --quiet \ + --zone "${ZONE}" \ + "${INSTANCE_PREFIX}"-influxdb-pd + fi + + # Delete all remaining firewall rules and network. + delete-firewall-rules \ + "${CLUSTER_NAME}-default-internal-master" \ + "${CLUSTER_NAME}-default-internal-node" \ + "${NETWORK}-default-ssh" \ + "${NETWORK}-default-rdp" \ + "${NETWORK}-default-internal" # Pre-1.5 clusters + + if [[ "${KUBE_DELETE_NETWORK}" == "true" ]]; then + # Delete all remaining firewall rules in the network. + delete-all-firewall-rules || true + delete-subnetworks || true + delete-network || true # might fail if there are leaked resources that reference the network + fi + + # If there are no more remaining master replicas, we should update kubeconfig. + export CONTEXT="${PROJECT}_${INSTANCE_PREFIX}" + clear-kubeconfig + else + # If some master replicas remain: cluster has been changed, we need to re-validate it. + echo "... calling validate-cluster" >&2 + # Override errexit + (validate-cluster) && validate_result="$?" || validate_result="$?" + + # We have two different failure modes from validate cluster: + # - 1: fatal error - cluster won't be working correctly + # - 2: weak error - something went wrong, but cluster probably will be working correctly + # We just print an error message in case 2). + if [[ "${validate_result}" -eq 1 ]]; then + exit 1 + elif [[ "${validate_result}" -eq 2 ]]; then + echo "...ignoring non-fatal errors in validate-cluster" >&2 + fi + fi + set -e +} + +# Prints name of one of the master replicas in the current zone. It will be either +# just MASTER_NAME or MASTER_NAME with a suffix for a replica (see get-replica-name-regexp). +# +# Assumed vars: +# PROJECT +# ZONE +# MASTER_NAME +# +# NOTE: Must be in sync with get-replica-name-regexp and set-replica-name. +function get-replica-name() { + echo $(gcloud compute instances list \ + --project "${PROJECT}" \ + --filter="name ~ '$(get-replica-name-regexp)' AND zone:(${ZONE})" \ + --format "value(name)" | head -n1) +} + +# Prints comma-separated names of all of the master replicas in all zones. +# +# Assumed vars: +# PROJECT +# MASTER_NAME +# +# NOTE: Must be in sync with get-replica-name-regexp and set-replica-name. +function get-all-replica-names() { + echo $(gcloud compute instances list \ + --project "${PROJECT}" \ + --filter="name ~ '$(get-replica-name-regexp)'" \ + --format "value(name)" | tr "\n" "," | sed 's/,$//') +} + +# Prints the number of all of the master replicas in all zones. +# +# Assumed vars: +# MASTER_NAME +function get-master-replicas-count() { + detect-project + local num_masters=$(gcloud compute instances list \ + --project "${PROJECT}" \ + --filter="name ~ '$(get-replica-name-regexp)'" \ + --format "value(zone)" | wc -l) + echo -n "${num_masters}" +} + +# Prints regexp for full master machine name. In a cluster with replicated master, +# VM names may either be MASTER_NAME or MASTER_NAME with a suffix for a replica. +function get-replica-name-regexp() { + echo "^${MASTER_NAME}(-...)?" +} + +# Sets REPLICA_NAME to a unique name for a master replica that will match +# expected regexp (see get-replica-name-regexp). +# +# Assumed vars: +# PROJECT +# ZONE +# MASTER_NAME +# +# Sets: +# REPLICA_NAME +function set-replica-name() { + local instances=$(gcloud compute instances list \ + --project "${PROJECT}" \ + --filter="name ~ '$(get-replica-name-regexp)'" \ + --format "value(name)") + + suffix="" + while echo "${instances}" | grep "${suffix}" &>/dev/null; do + suffix="$(date | md5sum | head -c3)" + done + REPLICA_NAME="${MASTER_NAME}-${suffix}" +} + +# Gets the instance templates in use by the cluster. It echos the template names +# so that the function output can be used. +# Assumed vars: +# NODE_INSTANCE_PREFIX +# WINDOWS_NODE_INSTANCE_PREFIX +# +# $1: project +function get-template() { + local linux_filter="${NODE_INSTANCE_PREFIX}-template(-(${KUBE_RELEASE_VERSION_DASHED_REGEX}|${KUBE_CI_VERSION_DASHED_REGEX}))?" + local windows_filter="${WINDOWS_NODE_INSTANCE_PREFIX}-template(-(${KUBE_RELEASE_VERSION_DASHED_REGEX}|${KUBE_CI_VERSION_DASHED_REGEX}))?" + + gcloud compute instance-templates list \ + --filter="name ~ '${linux_filter}' OR name ~ '${windows_filter}'" \ + --project="${1}" --format='value(name)' +} + +# Checks if there are any present resources related kubernetes cluster. +# +# Assumed vars: +# MASTER_NAME +# NODE_INSTANCE_PREFIX +# WINDOWS_NODE_INSTANCE_PREFIX +# ZONE +# REGION +# Vars set: +# KUBE_RESOURCE_FOUND +function check-resources() { + detect-project + detect-node-names + + echo "Looking for already existing resources" + KUBE_RESOURCE_FOUND="" + + if [[ -n "${INSTANCE_GROUPS[@]:-}" ]]; then + KUBE_RESOURCE_FOUND="Managed instance groups ${INSTANCE_GROUPS[@]}" + return 1 + fi + if [[ -n "${WINDOWS_INSTANCE_GROUPS[@]:-}" ]]; then + KUBE_RESOURCE_FOUND="Managed instance groups ${WINDOWS_INSTANCE_GROUPS[@]}" + return 1 + fi + + if gcloud compute instance-templates describe --project "${PROJECT}" "${NODE_INSTANCE_PREFIX}-template" &>/dev/null; then + KUBE_RESOURCE_FOUND="Instance template ${NODE_INSTANCE_PREFIX}-template" + return 1 + fi + if gcloud compute instance-templates describe --project "${PROJECT}" "${WINDOWS_NODE_INSTANCE_PREFIX}-template" &>/dev/null; then + KUBE_RESOURCE_FOUND="Instance template ${WINDOWS_NODE_INSTANCE_PREFIX}-template" + return 1 + fi + + if gcloud compute instances describe --project "${PROJECT}" "${MASTER_NAME}" --zone "${ZONE}" &>/dev/null; then + KUBE_RESOURCE_FOUND="Kubernetes master ${MASTER_NAME}" + return 1 + fi + + if gcloud compute disks describe --project "${PROJECT}" "${MASTER_NAME}"-pd --zone "${ZONE}" &>/dev/null; then + KUBE_RESOURCE_FOUND="Persistent disk ${MASTER_NAME}-pd" + return 1 + fi + + # Find out what minions are running. + local -a minions + minions=( $(gcloud compute instances list \ + --project "${PROJECT}" \ + --filter="(name ~ '${NODE_INSTANCE_PREFIX}-.+' OR name ~ '${WINDOWS_NODE_INSTANCE_PREFIX}-.+') AND zone:(${ZONE})" \ + --format='value(name)') ) + if (( "${#minions[@]}" > 0 )); then + KUBE_RESOURCE_FOUND="${#minions[@]} matching ${NODE_INSTANCE_PREFIX}-.+ or ${WINDOWS_NODE_INSTANCE_PREFIX}-.+" + return 1 + fi + + if gcloud compute firewall-rules describe --project "${NETWORK_PROJECT}" "${MASTER_NAME}-https" &>/dev/null; then + KUBE_RESOURCE_FOUND="Firewall rules for ${MASTER_NAME}-https" + return 1 + fi + + if gcloud compute firewall-rules describe --project "${NETWORK_PROJECT}" "${NODE_TAG}-all" &>/dev/null; then + KUBE_RESOURCE_FOUND="Firewall rules for ${MASTER_NAME}-all" + return 1 + fi + + local -a routes + routes=( $(gcloud compute routes list --project "${NETWORK_PROJECT}" \ + --filter="name ~ '${INSTANCE_PREFIX}-minion-.{4}'" --format='value(name)') ) + if (( "${#routes[@]}" > 0 )); then + KUBE_RESOURCE_FOUND="${#routes[@]} routes matching ${INSTANCE_PREFIX}-minion-.{4}" + return 1 + fi + + if gcloud compute addresses describe --project "${PROJECT}" "${MASTER_NAME}-ip" --region "${REGION}" &>/dev/null; then + KUBE_RESOURCE_FOUND="Master's reserved IP" + return 1 + fi + + # No resources found. + return 0 +} + +# ----------------------------------------------------------------------------- +# Cluster specific test helpers used from hack/e2e.go + +# Execute prior to running tests to build a release if required for env. +# +# Assumed Vars: +# KUBE_ROOT +function test-build-release() { + # Make a release + "${KUBE_ROOT}/build/release.sh" +} + +# Execute prior to running tests to initialize required structure. This is +# called from hack/e2e.go only when running -up. +# +# Assumed vars: +# Variables from config.sh +function test-setup() { + # Detect the project into $PROJECT if it isn't set + detect-project + + if [[ ${MULTIZONE:-} == "true" && -n ${E2E_ZONES:-} ]]; then + for KUBE_GCE_ZONE in ${E2E_ZONES}; do + KUBE_GCE_ZONE="${KUBE_GCE_ZONE}" KUBE_USE_EXISTING_MASTER="${KUBE_USE_EXISTING_MASTER:-}" "${KUBE_ROOT}/cluster/kube-up.sh" + KUBE_USE_EXISTING_MASTER="true" # For subsequent zones we use the existing master + done + else + "${KUBE_ROOT}/cluster/kube-up.sh" + fi + + # Open up port 80 & 8080 so common containers on minions can be reached + # TODO(roberthbailey): Remove this once we are no longer relying on hostPorts. + local start=`date +%s` + gcloud compute firewall-rules create \ + --project "${NETWORK_PROJECT}" \ + --target-tags "${NODE_TAG}" \ + --allow tcp:80,tcp:8080 \ + --network "${NETWORK}" \ + "${NODE_TAG}-${INSTANCE_PREFIX}-http-alt" 2> /dev/null || true + # As there is no simple way to wait longer for this operation we need to manually + # wait some additional time (20 minutes altogether). + while ! gcloud compute firewall-rules describe --project "${NETWORK_PROJECT}" "${NODE_TAG}-${INSTANCE_PREFIX}-http-alt" 2> /dev/null; do + if [[ $(($start + 1200)) -lt `date +%s` ]]; then + echo -e "${color_red}Failed to create firewall ${NODE_TAG}-${INSTANCE_PREFIX}-http-alt in ${NETWORK_PROJECT}" >&2 + exit 1 + fi + sleep 5 + done + + # Open up the NodePort range + # TODO(justinsb): Move to main setup, if we decide whether we want to do this by default. + start=`date +%s` + gcloud compute firewall-rules create \ + --project "${NETWORK_PROJECT}" \ + --target-tags "${NODE_TAG}" \ + --allow tcp:30000-32767,udp:30000-32767 \ + --network "${NETWORK}" \ + "${NODE_TAG}-${INSTANCE_PREFIX}-nodeports" 2> /dev/null || true + # As there is no simple way to wait longer for this operation we need to manually + # wait some additional time (20 minutes altogether). + while ! gcloud compute firewall-rules describe --project "${NETWORK_PROJECT}" "${NODE_TAG}-${INSTANCE_PREFIX}-nodeports" 2> /dev/null; do + if [[ $(($start + 1200)) -lt `date +%s` ]]; then + echo -e "${color_red}Failed to create firewall ${NODE_TAG}-${INSTANCE_PREFIX}-nodeports in ${PROJECT}" >&2 + exit 1 + fi + sleep 5 + done +} + +# Execute after running tests to perform any required clean-up. This is called +# from hack/e2e.go +function test-teardown() { + detect-project + echo "Shutting down test cluster in background." + delete-firewall-rules \ + "${NODE_TAG}-${INSTANCE_PREFIX}-http-alt" \ + "${NODE_TAG}-${INSTANCE_PREFIX}-nodeports" + if [[ ${MULTIZONE:-} == "true" && -n ${E2E_ZONES:-} ]]; then + local zones=( ${E2E_ZONES} ) + # tear them down in reverse order, finally tearing down the master too. + for ((zone_num=${#zones[@]}-1; zone_num>0; zone_num--)); do + KUBE_GCE_ZONE="${zones[zone_num]}" KUBE_USE_EXISTING_MASTER="true" "${KUBE_ROOT}/cluster/kube-down.sh" + done + KUBE_GCE_ZONE="${zones[0]}" KUBE_USE_EXISTING_MASTER="false" "${KUBE_ROOT}/cluster/kube-down.sh" + else + "${KUBE_ROOT}/cluster/kube-down.sh" + fi +} + +# SSH to a node by name ($1) and run a command ($2). +function ssh-to-node() { + local node="$1" + local cmd="$2" + # Loop until we can successfully ssh into the box + for try in {1..5}; do + if gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --ssh-flag="-o ConnectTimeout=30" --project "${PROJECT}" --zone="${ZONE}" "${node}" --command "echo test > /dev/null"; then + break + fi + sleep 5 + done + # Then actually try the command. + gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --ssh-flag="-o ConnectTimeout=30" --project "${PROJECT}" --zone="${ZONE}" "${node}" --command "${cmd}" +} + +# Perform preparations required to run e2e tests +function prepare-e2e() { + detect-project +} + +# Delete the image given by $1. +function delete-image() { + gcloud container images delete --quiet "$1" +} diff --git a/vendor/k8s.io/kubernetes/cluster/gce/windows/node-helper.sh b/vendor/k8s.io/kubernetes/cluster/gce/windows/node-helper.sh new file mode 100755 index 000000000..82a5fb985 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/gce/windows/node-helper.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A library of helper functions and constants for Windows nodes. + +function get-windows-node-instance-metadata-from-file { + local metadata="" + metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt," + metadata+="kube-env=${KUBE_TEMP}/windows-node-kube-env.yaml," + metadata+="kubelet-config=${KUBE_TEMP}/windows-node-kubelet-config.yaml," + # To get startup script output run "gcloud compute instances + # get-serial-port-output " from the location where you're running + # kube-up. + metadata+="windows-startup-script-ps1=${KUBE_ROOT}/cluster/gce/windows/configure.ps1," + metadata+="common-psm1=${KUBE_ROOT}/cluster/gce/windows/common.psm1," + metadata+="k8s-node-setup-psm1=${KUBE_ROOT}/cluster/gce/windows/k8s-node-setup.psm1," + metadata+="install-ssh-psm1=${KUBE_ROOT}/cluster/gce/windows/testonly/install-ssh.psm1," + metadata+="user-profile-psm1=${KUBE_ROOT}/cluster/gce/windows/testonly/user-profile.psm1," + metadata+="${NODE_EXTRA_METADATA}" + echo "${metadata}" +} + +function get-windows-node-instance-metadata { + local metadata="" + metadata+="k8s-version=${KUBE_VERSION:-v1.13.2}," + metadata+="serial-port-enable=1," + # This enables logging the serial port output. + # https://cloud.google.com/compute/docs/instances/viewing-serial-port-output + metadata+="serial-port-logging-enable=true," + metadata+="win-version=${WINDOWS_NODE_OS_DISTRIBUTION}" + echo "${metadata}" +} + +# $1: template name (required). +# $2: scopes flag. +function create-windows-node-instance-template { + local template_name="$1" + local scopes_flag="$2" + create-node-template "${template_name}" "${scopes_flag}" "$(get-windows-node-instance-metadata-from-file)" "$(get-windows-node-instance-metadata)" "windows" +} diff --git a/vendor/k8s.io/kubernetes/cluster/images/kubemark/BUILD b/vendor/k8s.io/kubernetes/cluster/images/kubemark/BUILD new file mode 100644 index 000000000..b1eebb554 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/images/kubemark/BUILD @@ -0,0 +1,36 @@ +package(default_visibility = ["//visibility:public"]) + +load("@io_bazel_rules_docker//container:container.bzl", "container_image", "container_push") + +container_image( + name = "image", + base = "@official_busybox//image", + entrypoint = ["/kubemark"], + files = ["//cmd/kubemark"], + stamp = True, +) + +container_push( + name = "push", + format = "Docker", + image = ":image", + registry = "$(REGISTRY)", + repository = "kubemark", + stamp = True, + tag = "$(IMAGE_TAG)", + tags = ["manual"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/cluster/images/kubemark/Dockerfile b/vendor/k8s.io/kubernetes/cluster/images/kubemark/Dockerfile new file mode 100644 index 000000000..fb4f28735 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/images/kubemark/Dockerfile @@ -0,0 +1,17 @@ +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM debian:jessie + +COPY kubemark /kubemark diff --git a/vendor/k8s.io/kubernetes/cluster/images/kubemark/Makefile b/vendor/k8s.io/kubernetes/cluster/images/kubemark/Makefile new file mode 100644 index 000000000..611e1712c --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/images/kubemark/Makefile @@ -0,0 +1,36 @@ +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# build Kubemark image from currently built binaries containing both 'real' master and Hollow Node. +# This makefile assumes that the kubemark binary is present in this directory. + +# Allow the caller to override this. Beware make's precedence. This: +# REGISTRY=$VAR make +# .. is not the same as: +# make REGISTRY=$VAR +REGISTRY := $(if $(REGISTRY),$(REGISTRY),staging-k8s.gcr.io) +IMAGE_TAG := $(if $(IMAGE_TAG),$(IMAGE_TAG),latest) + +all: gcloudpush + +build: + docker build --pull -t $(REGISTRY)/kubemark:$(IMAGE_TAG) . + +gcloudpush: build + docker push $(REGISTRY)/kubemark:$(IMAGE_TAG) + +push: build + docker push $(REGISTRY)/kubemark:$(IMAGE_TAG) + +.PHONY: all build gcloudpush push diff --git a/vendor/k8s.io/kubernetes/cluster/images/kubemark/OWNERS b/vendor/k8s.io/kubernetes/cluster/images/kubemark/OWNERS new file mode 100644 index 000000000..329e8801d --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/images/kubemark/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - gmarek + - shyamjvs + - wojtek-t +approvers: + - gmarek + - shyamjvs + - wojtek-t diff --git a/vendor/k8s.io/kubernetes/cluster/kube-util.sh b/vendor/k8s.io/kubernetes/cluster/kube-util.sh new file mode 100755 index 000000000..b33b4ae9f --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/kube-util.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script will source the default skeleton helper functions, then sources +# cluster/${KUBERNETES_PROVIDER}/util.sh where KUBERNETES_PROVIDER, if unset, +# will use its default value (gce). + +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. + +source "${KUBE_ROOT}/cluster/skeleton/util.sh" + +if [[ "${KUBERNETES_PROVIDER:-}" != "kubernetes-anywhere" ]]; then + if [[ -n "${KUBERNETES_CONFORMANCE_TEST:-}" ]]; then + KUBERNETES_PROVIDER="" + else + KUBERNETES_PROVIDER="${KUBERNETES_PROVIDER:-gce}" + fi +fi + +# PROVIDER_VARS is a list of cloud provider specific variables. Note: +# this is a list of the _names_ of the variables, not the value of the +# variables. Providers can add variables to be appended to kube-env. +# (see `build-kube-env`). + +PROVIDER_UTILS="${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh" +if [ -f "${PROVIDER_UTILS}" ]; then + source "${PROVIDER_UTILS}" +fi diff --git a/vendor/k8s.io/kubernetes/cluster/kubectl.sh b/vendor/k8s.io/kubernetes/cluster/kubectl.sh new file mode 100755 index 000000000..c9b478417 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/kubectl.sh @@ -0,0 +1,77 @@ +#!/usr/bin/env bash + +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +# Stop the bleeding, turn off the warning until we fix token gen. +# echo "-=-=-=-=-=-=-=-=-=-=" +# echo "NOTE:" +# echo "kubectl.sh is deprecated and will be removed soon." +# echo "please replace all usage with calls to the kubectl" +# echo "binary and ensure that it is in your PATH." +# echo "" +# echo "Please see 'kubectl help config' for more details" +# echo "about configuring kubectl for your cluster." +# echo "-=-=-=-=-=-=-=-=-=-=" + + +KUBE_ROOT=${KUBE_ROOT:-$(dirname "${BASH_SOURCE[0]}")/..} +source "${KUBE_ROOT}/cluster/kube-util.sh" +source "${KUBE_ROOT}/cluster/clientbin.sh" + +# If KUBECTL_PATH isn't set, gather up the list of likely places and use ls +# to find the latest one. +if [[ -z "${KUBECTL_PATH:-}" ]]; then + kubectl=$( get_bin "kubectl" "cmd/kubectl" ) + + if [[ ! -x "$kubectl" ]]; then + print_error "kubectl" + exit 1 + fi +elif [[ ! -x "${KUBECTL_PATH}" ]]; then + { + echo "KUBECTL_PATH environment variable set to '${KUBECTL_PATH}', but " + echo "this doesn't seem to be a valid executable." + } >&2 + exit 1 +fi +kubectl="${KUBECTL_PATH:-${kubectl}}" + +if [[ "$KUBERNETES_PROVIDER" == "gke" ]]; then + detect-project &> /dev/null +elif [[ "$KUBERNETES_PROVIDER" == "ubuntu" ]]; then + detect-master > /dev/null + config=( + "--server=http://${KUBE_MASTER_IP}:8080" + ) +fi + + +if false; then + # disable these debugging messages by default + echo "current-context: \"$(${kubectl} "${config[@]:+${config[@]}}" config view -o template --template='{{index . "current-context"}}')\"" >&2 + echo "Running:" "${kubectl}" "${config[@]:+${config[@]}}" "${@+$@}" >&2 +fi + +if [[ "${1:-}" =~ ^(path)$ ]]; then + echo "${kubectl}" + exit 0 +fi + +"${kubectl}" "${config[@]:+${config[@]}}" "${@+$@}" + diff --git a/vendor/k8s.io/kubernetes/cluster/kubemark/OWNERS b/vendor/k8s.io/kubernetes/cluster/kubemark/OWNERS new file mode 100644 index 000000000..329e8801d --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/kubemark/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - gmarek + - shyamjvs + - wojtek-t +approvers: + - gmarek + - shyamjvs + - wojtek-t diff --git a/vendor/k8s.io/kubernetes/cluster/kubemark/gce/config-default.sh b/vendor/k8s.io/kubernetes/cluster/kubemark/gce/config-default.sh new file mode 100644 index 000000000..882657687 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/kubemark/gce/config-default.sh @@ -0,0 +1,143 @@ +#!/usr/bin/env bash + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A configuration for Kubemark cluster. It doesn't need to be kept in +# sync with gce/config-default.sh (except the filename, because I'm reusing +# gce/util.sh script which assumes config filename), but if some things that +# are enabled by default should not run in hollow clusters, they should be disabled here. + +# shellcheck disable=SC2034 # Variables sourced in other scripts. + +source "${KUBE_ROOT}/cluster/gce/config-common.sh" + +GCLOUD=gcloud +ZONE=${KUBE_GCE_ZONE:-us-central1-b} +REGION=${ZONE%-*} +NUM_NODES=${KUBEMARK_NUM_NODES:-10} +NUM_WINDOWS_NODES=${KUBEMARK_NUM_WINDOWS_NODES:-0} +MASTER_SIZE=${KUBEMARK_MASTER_SIZE:-n1-standard-$(get-master-size)} +MASTER_DISK_TYPE=pd-ssd +MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-$(get-master-disk-size)} +MASTER_ROOT_DISK_SIZE=${KUBEMARK_MASTER_ROOT_DISK_SIZE:-$(get-master-root-disk-size)} +REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false} +PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false} +NODE_ACCELERATORS=${NODE_ACCELERATORS:-""} +CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false} +EVENT_PD=${EVENT_PD:-false} + +MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-gci} +NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-gci} +MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-cos-beta-73-11647-64-0} +MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud} +CLEANUP_KUBEMARK_IMAGE=${CLEANUP_KUBEMARK_IMAGE:-true} + +# GPUs supported in GCE do not have compatible drivers in Debian 7. +if [[ "${NODE_OS_DISTRIBUTION}" == "debian" ]]; then + NODE_ACCELERATORS="" +fi + +NETWORK=${KUBE_GCE_NETWORK:-e2e} +if [[ "${CREATE_CUSTOM_NETWORK}" == true ]]; then + SUBNETWORK="${SUBNETWORK:-${NETWORK}-custom-subnet}" +fi +INSTANCE_PREFIX="${INSTANCE_PREFIX:-"default"}" +MASTER_NAME="${INSTANCE_PREFIX}-kubemark-master" +AGGREGATOR_MASTER_NAME="${INSTANCE_PREFIX}-kubemark-aggregator" +MASTER_TAG="kubemark-master" +EVENT_STORE_NAME="${INSTANCE_PREFIX}-event-store" +MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" +CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-$(get-cluster-ip-range)}" +RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}" +TERMINATED_POD_GC_THRESHOLD=${TERMINATED_POD_GC_THRESHOLD:-100} +KUBE_APISERVER_REQUEST_TIMEOUT=300 +ETCD_COMPACTION_INTERVAL_SEC="${KUBEMARK_ETCD_COMPACTION_INTERVAL_SEC:-}" + +# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.1.12-1) if you need +# non-default version. +ETCD_IMAGE="${TEST_ETCD_IMAGE:-}" +ETCD_VERSION="${TEST_ETCD_VERSION:-}" +ETCD_SERVERS="${KUBEMARK_ETCD_SERVERS:-}" +ETCD_SERVERS_OVERRIDES="${KUBEMARK_ETCD_SERVERS_OVERRIDES:-}" + +# Storage backend. 'etcd2' and 'etcd3' are supported. +STORAGE_BACKEND=${STORAGE_BACKEND:-} +# Storage media type: application/json and application/vnd.kubernetes.protobuf are supported. +STORAGE_MEDIA_TYPE=${STORAGE_MEDIA_TYPE:-} + +# Default Log level for all components in test clusters and variables to override it in specific components. +TEST_CLUSTER_LOG_LEVEL="${TEST_CLUSTER_LOG_LEVEL:---v=4}" +API_SERVER_TEST_LOG_LEVEL="${API_SERVER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}" +CONTROLLER_MANAGER_TEST_LOG_LEVEL="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}" +SCHEDULER_TEST_LOG_LEVEL="${SCHEDULER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}" + +HOLLOW_KUBELET_TEST_LOG_LEVEL="${HOLLOW_KUBELET_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}" +HOLLOW_PROXY_TEST_LOG_LEVEL="${HOLLOW_PROXY_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}" + +TEST_CLUSTER_DELETE_COLLECTION_WORKERS="${TEST_CLUSTER_DELETE_COLLECTION_WORKERS:---delete-collection-workers=16}" +TEST_CLUSTER_MAX_REQUESTS_INFLIGHT="${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT:-}" +TEST_CLUSTER_RESYNC_PERIOD="${TEST_CLUSTER_RESYNC_PERIOD:-}" + +# ContentType used by all components to communicate with apiserver. +TEST_CLUSTER_API_CONTENT_TYPE="${TEST_CLUSTER_API_CONTENT_TYPE:-}" + +KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS="${KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS:-}" + +CUSTOM_ADMISSION_PLUGINS="${CUSTOM_ADMISSION_PLUGINS:-NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota}" + +# Master components' test arguments. +APISERVER_TEST_ARGS="${KUBEMARK_APISERVER_TEST_ARGS:-} --runtime-config=extensions/v1beta1,scheduling.k8s.io/v1alpha1 ${API_SERVER_TEST_LOG_LEVEL} ${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT} ${TEST_CLUSTER_DELETE_COLLECTION_WORKERS}" +CONTROLLER_MANAGER_TEST_ARGS="${KUBEMARK_CONTROLLER_MANAGER_TEST_ARGS:-} ${CONTROLLER_MANAGER_TEST_LOG_LEVEL} ${TEST_CLUSTER_RESYNC_PERIOD} ${TEST_CLUSTER_API_CONTENT_TYPE} ${KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS}" +SCHEDULER_TEST_ARGS="${KUBEMARK_SCHEDULER_TEST_ARGS:-} ${SCHEDULER_TEST_LOG_LEVEL} ${TEST_CLUSTER_API_CONTENT_TYPE} ${KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS}" + +# Hollow-node components' test arguments. +HOLLOW_KUBELET_TEST_ARGS="${HOLLOW_KUBELET_TEST_ARGS:-} ${HOLLOW_KUBELET_TEST_LOG_LEVEL}" +HOLLOW_PROXY_TEST_ARGS="${HOLLOW_PROXY_TEST_ARGS:-} ${HOLLOW_PROXY_TEST_LOG_LEVEL}" + +SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET +ALLOCATE_NODE_CIDRS=true + +# Optional: Enable cluster autoscaler. +ENABLE_KUBEMARK_CLUSTER_AUTOSCALER="${ENABLE_KUBEMARK_CLUSTER_AUTOSCALER:-false}" +# When using Cluster Autoscaler, always start with one hollow-node replica. +# NUM_NODES should not be specified by the user. Instead we use +# NUM_NODES=KUBEMARK_AUTOSCALER_MAX_NODES. This gives other cluster components +# (e.g. kubemark master, Heapster) enough resources to handle maximum cluster size. +if [[ "${ENABLE_KUBEMARK_CLUSTER_AUTOSCALER}" == "true" ]]; then + NUM_REPLICAS=1 + if [[ -n "$NUM_NODES" ]]; then + echo "WARNING: Using Cluster Autoscaler, ignoring NUM_NODES parameter. Set KUBEMARK_AUTOSCALER_MAX_NODES to specify maximum size of the cluster." + fi +fi + +#Optional: Enable kube dns. +ENABLE_KUBEMARK_KUBE_DNS="${ENABLE_KUBEMARK_KUBE_DNS:-true}" +KUBE_DNS_DOMAIN="${KUBE_DNS_DOMAIN:-cluster.local}" + +# Optional: set feature gates +FEATURE_GATES="${KUBE_FEATURE_GATES:-ExperimentalCriticalPodAnnotation=true}" + +# Enable a simple "AdvancedAuditing" setup for testing. +ENABLE_APISERVER_ADVANCED_AUDIT="${ENABLE_APISERVER_ADVANCED_AUDIT:-false}" + +# Optional: enable pod priority +ENABLE_POD_PRIORITY="${ENABLE_POD_PRIORITY:-}" +if [[ "${ENABLE_POD_PRIORITY}" == "true" ]]; then + FEATURE_GATES="${FEATURE_GATES},PodPriority=true" +fi + +# The number of services that are allowed to sync concurrently. Will be passed +# into kube-controller-manager via `--concurrent-service-syncs` +CONCURRENT_SERVICE_SYNCS="${CONCURRENT_SERVICE_SYNCS:-}" diff --git a/vendor/k8s.io/kubernetes/cluster/kubemark/util.sh b/vendor/k8s.io/kubernetes/cluster/kubemark/util.sh new file mode 100644 index 000000000..52ecbdf5d --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/kubemark/util.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../.. +source "${KUBE_ROOT}/test/kubemark/cloud-provider-config.sh" +source "${KUBE_ROOT}/cluster/${CLOUD_PROVIDER}/util.sh" +source "${KUBE_ROOT}/cluster/kubemark/${CLOUD_PROVIDER}/config-default.sh" diff --git a/vendor/k8s.io/kubernetes/cluster/skeleton/util.sh b/vendor/k8s.io/kubernetes/cluster/skeleton/util.sh new file mode 100644 index 000000000..d43d57a7a --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/skeleton/util.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash + +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script contains the helper functions that each provider hosting +# Kubernetes must implement to use cluster/kube-*.sh scripts. + +# Must ensure that the following ENV vars are set +function detect-master { + echo "KUBE_MASTER_IP: ${KUBE_MASTER_IP:-}" 1>&2 + echo "KUBE_MASTER: ${KUBE_MASTER:-}" 1>&2 +} + +# Get node names if they are not static. +function detect-node-names { + echo "NODE_NAMES: [${NODE_NAMES[*]}]" 1>&2 +} + +# Get node IP addresses and store in KUBE_NODE_IP_ADDRESSES[] +function detect-nodes { + echo "KUBE_NODE_IP_ADDRESSES: [${KUBE_NODE_IP_ADDRESSES[*]}]" 1>&2 +} + +# Verify prereqs on host machine +function verify-prereqs { + echo "Skeleton Provider: verify-prereqs not implemented" 1>&2 +} + +# Validate a kubernetes cluster +function validate-cluster { + # by default call the generic validate-cluster.sh script, customizable by + # any cluster provider if this does not fit. + "${KUBE_ROOT}/cluster/validate-cluster.sh" +} + +# Instantiate a kubernetes cluster +function kube-up { + echo "Skeleton Provider: kube-up not implemented" 1>&2 +} + +# Delete a kubernetes cluster +function kube-down { + echo "Skeleton Provider: kube-down not implemented" 1>&2 +} + +# Execute prior to running tests to build a release if required for env +function test-build-release { + echo "Skeleton Provider: test-build-release not implemented" 1>&2 +} + +# Execute prior to running tests to initialize required structure +function test-setup { + echo "Skeleton Provider: test-setup not implemented" 1>&2 +} + +# Execute after running tests to perform any required clean-up +function test-teardown { + echo "Skeleton Provider: test-teardown not implemented" 1>&2 +} + +function prepare-e2e { + echo "Skeleton Provider: prepare-e2e not implemented" 1>&2 +} + +function detect-project { + echo "Skeleton Provider: detect-project not implemented" 1>&2 +} diff --git a/vendor/k8s.io/kubernetes/cluster/windows/BUILD b/vendor/k8s.io/kubernetes/cluster/windows/BUILD new file mode 100644 index 000000000..158aef837 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/windows/BUILD @@ -0,0 +1,35 @@ +load("@io_k8s_repo_infra//defs:build.bzl", "release_filegroup") + +# Having the Windows code from the GCE cluster deploy hosted with the release is +# useful for GKE. This list should match the list in k8s.io/release/lib/releaselib.sh. + +# IMPORTANT PLEASE NOTE: +# Any time the file structure in the `windows` directory changes, `windows/BUILD` +# and `k8s.io/release/lib/releaselib.sh` must be manually updated with the changes. +# We HIGHLY recommend not changing the file structure, because consumers of +# Kubernetes releases depend on the release structure remaining stable. +release_filegroup( + name = "gcs-release-artifacts", + srcs = [ + "common.psm1", + "configure.ps1", + "k8s-node-setup.psm1", + "testonly/install-ssh.psm1", + "testonly/user-profile.psm1", + ], + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/cluster/windows/OWNERS b/vendor/k8s.io/kubernetes/cluster/windows/OWNERS new file mode 100644 index 000000000..04490edb6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/windows/OWNERS @@ -0,0 +1,2 @@ +approvers: +- yujuhong diff --git a/vendor/k8s.io/kubernetes/cluster/windows/node-helper.sh b/vendor/k8s.io/kubernetes/cluster/windows/node-helper.sh new file mode 100755 index 000000000..82a5fb985 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cluster/windows/node-helper.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A library of helper functions and constants for Windows nodes. + +function get-windows-node-instance-metadata-from-file { + local metadata="" + metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt," + metadata+="kube-env=${KUBE_TEMP}/windows-node-kube-env.yaml," + metadata+="kubelet-config=${KUBE_TEMP}/windows-node-kubelet-config.yaml," + # To get startup script output run "gcloud compute instances + # get-serial-port-output " from the location where you're running + # kube-up. + metadata+="windows-startup-script-ps1=${KUBE_ROOT}/cluster/gce/windows/configure.ps1," + metadata+="common-psm1=${KUBE_ROOT}/cluster/gce/windows/common.psm1," + metadata+="k8s-node-setup-psm1=${KUBE_ROOT}/cluster/gce/windows/k8s-node-setup.psm1," + metadata+="install-ssh-psm1=${KUBE_ROOT}/cluster/gce/windows/testonly/install-ssh.psm1," + metadata+="user-profile-psm1=${KUBE_ROOT}/cluster/gce/windows/testonly/user-profile.psm1," + metadata+="${NODE_EXTRA_METADATA}" + echo "${metadata}" +} + +function get-windows-node-instance-metadata { + local metadata="" + metadata+="k8s-version=${KUBE_VERSION:-v1.13.2}," + metadata+="serial-port-enable=1," + # This enables logging the serial port output. + # https://cloud.google.com/compute/docs/instances/viewing-serial-port-output + metadata+="serial-port-logging-enable=true," + metadata+="win-version=${WINDOWS_NODE_OS_DISTRIBUTION}" + echo "${metadata}" +} + +# $1: template name (required). +# $2: scopes flag. +function create-windows-node-instance-template { + local template_name="$1" + local scopes_flag="$2" + create-node-template "${template_name}" "${scopes_flag}" "$(get-windows-node-instance-metadata-from-file)" "$(get-windows-node-instance-metadata)" "windows" +} diff --git a/vendor/k8s.io/kubernetes/hack/lib/BUILD b/vendor/k8s.io/kubernetes/hack/lib/BUILD new file mode 100644 index 000000000..bfce52eb2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/hack/lib/BUILD @@ -0,0 +1,28 @@ +package(default_visibility = ["//visibility:public"]) + +sh_library( + name = "lib", + srcs = [ + "etcd.sh", + "golang.sh", + "init.sh", + "logging.sh", + "swagger.sh", + "test.sh", + "util.sh", + "version.sh", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/hack/lib/etcd.sh b/vendor/k8s.io/kubernetes/hack/lib/etcd.sh new file mode 100755 index 000000000..79821e7d9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/hack/lib/etcd.sh @@ -0,0 +1,138 @@ +#!/usr/bin/env bash + +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A set of helpers for starting/running etcd for tests + +ETCD_VERSION=${ETCD_VERSION:-3.3.10} +ETCD_HOST=${ETCD_HOST:-127.0.0.1} +ETCD_PORT=${ETCD_PORT:-2379} +export KUBE_INTEGRATION_ETCD_URL="http://${ETCD_HOST}:${ETCD_PORT}" + +kube::etcd::validate() { + # validate if in path + command -v etcd >/dev/null || { + kube::log::usage "etcd must be in your PATH" + kube::log::info "You can use 'hack/install-etcd.sh' to install a copy in third_party/." + exit 1 + } + + # validate etcd port is free + local port_check_command + if command -v ss &> /dev/null && ss -Version | grep 'iproute2' &> /dev/null; then + port_check_command="ss" + elif command -v netstat &>/dev/null; then + port_check_command="netstat" + else + kube::log::usage "unable to identify if etcd is bound to port ${ETCD_PORT}. unable to find ss or netstat utilities." + exit 1 + fi + if ${port_check_command} -nat | grep "LISTEN" | grep "[\.:]${ETCD_PORT:?}" >/dev/null 2>&1; then + kube::log::usage "unable to start etcd as port ${ETCD_PORT} is in use. please stop the process listening on this port and retry." + kube::log::usage "$(netstat -nat | grep "[\.:]${ETCD_PORT:?} .*LISTEN")" + exit 1 + fi + + # validate installed version is at least equal to minimum + version=$(etcd --version | tail -n +1 | head -n 1 | cut -d " " -f 3) + if [[ $(kube::etcd::version "${ETCD_VERSION}") -gt $(kube::etcd::version "${version}") ]]; then + export PATH=${KUBE_ROOT}/third_party/etcd:${PATH} + hash etcd + echo "${PATH}" + version=$(etcd --version | head -n 1 | cut -d " " -f 3) + if [[ $(kube::etcd::version "${ETCD_VERSION}") -gt $(kube::etcd::version "${version}") ]]; then + kube::log::usage "etcd version ${ETCD_VERSION} or greater required." + kube::log::info "You can use 'hack/install-etcd.sh' to install a copy in third_party/." + exit 1 + fi + fi +} + +kube::etcd::version() { + printf '%s\n' "${@}" | awk -F . '{ printf("%d%03d%03d\n", $1, $2, $3) }' +} + +kube::etcd::start() { + # validate before running + kube::etcd::validate + + # Start etcd + ETCD_DIR=${ETCD_DIR:-$(mktemp -d 2>/dev/null || mktemp -d -t test-etcd.XXXXXX)} + if [[ -d "${ARTIFACTS:-}" ]]; then + ETCD_LOGFILE="${ARTIFACTS}/etcd.$(uname -n).$(id -un).log.DEBUG.$(date +%Y%m%d-%H%M%S).$$" + else + ETCD_LOGFILE=${ETCD_LOGFILE:-"/dev/null"} + fi + kube::log::info "etcd --advertise-client-urls ${KUBE_INTEGRATION_ETCD_URL} --data-dir ${ETCD_DIR} --listen-client-urls http://${ETCD_HOST}:${ETCD_PORT} --debug > \"${ETCD_LOGFILE}\" 2>/dev/null" + etcd --advertise-client-urls "${KUBE_INTEGRATION_ETCD_URL}" --data-dir "${ETCD_DIR}" --listen-client-urls "${KUBE_INTEGRATION_ETCD_URL}" --debug 2> "${ETCD_LOGFILE}" >/dev/null & + ETCD_PID=$! + + echo "Waiting for etcd to come up." + kube::util::wait_for_url "${KUBE_INTEGRATION_ETCD_URL}/v2/machines" "etcd: " 0.25 80 + curl -fs -X PUT "${KUBE_INTEGRATION_ETCD_URL}/v2/keys/_test" +} + +kube::etcd::stop() { + if [[ -n "${ETCD_PID-}" ]]; then + kill "${ETCD_PID}" &>/dev/null || : + wait "${ETCD_PID}" &>/dev/null || : + fi +} + +kube::etcd::clean_etcd_dir() { + if [[ -n "${ETCD_DIR-}" ]]; then + rm -rf "${ETCD_DIR}" + fi +} + +kube::etcd::cleanup() { + kube::etcd::stop + kube::etcd::clean_etcd_dir +} + +kube::etcd::install() { + ( + local os + local arch + + os=$(kube::util::host_os) + arch=$(kube::util::host_arch) + + cd "${KUBE_ROOT}/third_party" || return 1 + if [[ $(readlink etcd) == etcd-v${ETCD_VERSION}-${os}-* ]]; then + kube::log::info "etcd v${ETCD_VERSION} already installed at path:" + kube::log::info "$(pwd)/$(readlink etcd)" + return # already installed + fi + + if [[ ${os} == "darwin" ]]; then + download_file="etcd-v${ETCD_VERSION}-darwin-amd64.zip" + url="https://github.com/coreos/etcd/releases/download/v${ETCD_VERSION}/${download_file}" + kube::util::download_file "${url}" "${download_file}" + unzip -o "${download_file}" + ln -fns "etcd-v${ETCD_VERSION}-darwin-amd64" etcd + rm "${download_file}" + else + url="https://github.com/coreos/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-${arch}.tar.gz" + download_file="etcd-v${ETCD_VERSION}-linux-${arch}.tar.gz" + kube::util::download_file "${url}" "${download_file}" + tar xzf "${download_file}" + ln -fns "etcd-v${ETCD_VERSION}-linux-${arch}" etcd + rm "${download_file}" + fi + kube::log::info "etcd v${ETCD_VERSION} installed. To use:" + kube::log::info "export PATH=$(pwd)/etcd:\${PATH}" + ) +} diff --git a/vendor/k8s.io/kubernetes/hack/lib/golang.sh b/vendor/k8s.io/kubernetes/hack/lib/golang.sh new file mode 100755 index 000000000..22a3bc2be --- /dev/null +++ b/vendor/k8s.io/kubernetes/hack/lib/golang.sh @@ -0,0 +1,770 @@ +#!/usr/bin/env bash + +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The golang package that we are building. +readonly KUBE_GO_PACKAGE=k8s.io/kubernetes +readonly KUBE_GOPATH="${KUBE_OUTPUT}/go" + +# The set of server targets that we are only building for Linux +# If you update this list, please also update build/BUILD. +kube::golang::server_targets() { + local targets=( + cmd/kube-proxy + cmd/kube-apiserver + cmd/kube-controller-manager + cmd/cloud-controller-manager + cmd/kubelet + cmd/kubeadm + cmd/hyperkube + cmd/kube-scheduler + vendor/k8s.io/apiextensions-apiserver + cluster/gce/gci/mounter + ) + echo "${targets[@]}" +} + +IFS=" " read -ra KUBE_SERVER_TARGETS <<< "$(kube::golang::server_targets)" +readonly KUBE_SERVER_TARGETS +readonly KUBE_SERVER_BINARIES=("${KUBE_SERVER_TARGETS[@]##*/}") + +# The set of server targets we build docker images for +kube::golang::server_image_targets() { + # NOTE: this contains cmd targets for kube::build::get_docker_wrapped_binaries + local targets=( + cmd/cloud-controller-manager + cmd/kube-apiserver + cmd/kube-controller-manager + cmd/kube-scheduler + cmd/kube-proxy + ) + echo "${targets[@]}" +} + +IFS=" " read -ra KUBE_SERVER_IMAGE_TARGETS <<< "$(kube::golang::server_image_targets)" +readonly KUBE_SERVER_IMAGE_TARGETS +readonly KUBE_SERVER_IMAGE_BINARIES=("${KUBE_SERVER_IMAGE_TARGETS[@]##*/}") + +# The set of conformance targets we build docker image for +kube::golang::conformance_image_targets() { + # NOTE: this contains cmd targets for kube::release::build_conformance_image + local targets=( + vendor/github.com/onsi/ginkgo/ginkgo + test/e2e/e2e.test + cmd/kubectl + ) + echo "${targets[@]}" +} + +IFS=" " read -ra KUBE_CONFORMANCE_IMAGE_TARGETS <<< "$(kube::golang::conformance_image_targets)" +readonly KUBE_CONFORMANCE_IMAGE_TARGETS + +# The set of server targets that we are only building for Kubernetes nodes +# If you update this list, please also update build/BUILD. +kube::golang::node_targets() { + local targets=( + cmd/kube-proxy + cmd/kubeadm + cmd/kubelet + ) + echo "${targets[@]}" +} + +IFS=" " read -ra KUBE_NODE_TARGETS <<< "$(kube::golang::node_targets)" +readonly KUBE_NODE_TARGETS +readonly KUBE_NODE_BINARIES=("${KUBE_NODE_TARGETS[@]##*/}") +readonly KUBE_NODE_BINARIES_WIN=("${KUBE_NODE_BINARIES[@]/%/.exe}") + +if [[ -n "${KUBE_BUILD_PLATFORMS:-}" ]]; then + IFS=" " read -ra KUBE_SERVER_PLATFORMS <<< "${KUBE_BUILD_PLATFORMS}" + IFS=" " read -ra KUBE_NODE_PLATFORMS <<< "${KUBE_BUILD_PLATFORMS}" + IFS=" " read -ra KUBE_TEST_PLATFORMS <<< "${KUBE_BUILD_PLATFORMS}" + IFS=" " read -ra KUBE_CLIENT_PLATFORMS <<< "${KUBE_BUILD_PLATFORMS}" + readonly KUBE_SERVER_PLATFORMS + readonly KUBE_NODE_PLATFORMS + readonly KUBE_TEST_PLATFORMS + readonly KUBE_CLIENT_PLATFORMS +elif [[ "${KUBE_FASTBUILD:-}" == "true" ]]; then + readonly KUBE_SERVER_PLATFORMS=(linux/amd64) + readonly KUBE_NODE_PLATFORMS=(linux/amd64) + if [[ "${KUBE_BUILDER_OS:-}" == "darwin"* ]]; then + readonly KUBE_TEST_PLATFORMS=( + darwin/amd64 + linux/amd64 + ) + readonly KUBE_CLIENT_PLATFORMS=( + darwin/amd64 + linux/amd64 + ) + else + readonly KUBE_TEST_PLATFORMS=(linux/amd64) + readonly KUBE_CLIENT_PLATFORMS=(linux/amd64) + fi +else + + # The server platform we are building on. + readonly KUBE_SERVER_PLATFORMS=( + linux/amd64 + linux/arm + linux/arm64 + linux/s390x + linux/ppc64le + ) + + # The node platforms we build for + readonly KUBE_NODE_PLATFORMS=( + linux/amd64 + linux/arm + linux/arm64 + linux/s390x + linux/ppc64le + windows/amd64 + ) + + # If we update this we should also update the set of platforms whose standard library is precompiled for in build/build-image/cross/Dockerfile + readonly KUBE_CLIENT_PLATFORMS=( + linux/amd64 + linux/386 + linux/arm + linux/arm64 + linux/s390x + linux/ppc64le + darwin/amd64 + darwin/386 + windows/amd64 + windows/386 + ) + + # Which platforms we should compile test targets for. Not all client platforms need these tests + readonly KUBE_TEST_PLATFORMS=( + linux/amd64 + linux/arm + linux/arm64 + linux/s390x + linux/ppc64le + darwin/amd64 + windows/amd64 + ) +fi + +# The set of client targets that we are building for all platforms +# If you update this list, please also update build/BUILD. +readonly KUBE_CLIENT_TARGETS=( + cmd/kubectl +) +readonly KUBE_CLIENT_BINARIES=("${KUBE_CLIENT_TARGETS[@]##*/}") +readonly KUBE_CLIENT_BINARIES_WIN=("${KUBE_CLIENT_BINARIES[@]/%/.exe}") + +# The set of test targets that we are building for all platforms +# If you update this list, please also update build/BUILD. +kube::golang::test_targets() { + local targets=( + cmd/gendocs + cmd/genkubedocs + cmd/genman + cmd/genyaml + cmd/genswaggertypedocs + cmd/linkcheck + vendor/github.com/onsi/ginkgo/ginkgo + test/e2e/e2e.test + ) + echo "${targets[@]}" +} +IFS=" " read -ra KUBE_TEST_TARGETS <<< "$(kube::golang::test_targets)" +readonly KUBE_TEST_TARGETS +readonly KUBE_TEST_BINARIES=("${KUBE_TEST_TARGETS[@]##*/}") +readonly KUBE_TEST_BINARIES_WIN=("${KUBE_TEST_BINARIES[@]/%/.exe}") +# If you update this list, please also update build/BUILD. +readonly KUBE_TEST_PORTABLE=( + test/e2e/testing-manifests + test/kubemark + hack/e2e.go + hack/e2e-internal + hack/get-build.sh + hack/ginkgo-e2e.sh + hack/lib +) + +# Test targets which run on the Kubernetes clusters directly, so we only +# need to target server platforms. +# These binaries will be distributed in the kubernetes-test tarball. +# If you update this list, please also update build/BUILD. +kube::golang::server_test_targets() { + local targets=( + cmd/kubemark + vendor/github.com/onsi/ginkgo/ginkgo + ) + + if [[ "${OSTYPE:-}" == "linux"* ]]; then + targets+=( test/e2e_node/e2e_node.test ) + fi + + echo "${targets[@]}" +} + +IFS=" " read -ra KUBE_TEST_SERVER_TARGETS <<< "$(kube::golang::server_test_targets)" +readonly KUBE_TEST_SERVER_TARGETS +readonly KUBE_TEST_SERVER_BINARIES=("${KUBE_TEST_SERVER_TARGETS[@]##*/}") +readonly KUBE_TEST_SERVER_PLATFORMS=("${KUBE_SERVER_PLATFORMS[@]}") + +# Gigabytes necessary for parallel platform builds. +# As of January 2018, RAM usage is exceeding 30G +# Setting to 40 to provide some headroom +readonly KUBE_PARALLEL_BUILD_MEMORY=40 + +readonly KUBE_ALL_TARGETS=( + "${KUBE_SERVER_TARGETS[@]}" + "${KUBE_CLIENT_TARGETS[@]}" + "${KUBE_TEST_TARGETS[@]}" + "${KUBE_TEST_SERVER_TARGETS[@]}" +) +readonly KUBE_ALL_BINARIES=("${KUBE_ALL_TARGETS[@]##*/}") + +readonly KUBE_STATIC_LIBRARIES=( + cloud-controller-manager + kube-apiserver + kube-controller-manager + kube-scheduler + kube-proxy + kubeadm + kubectl +) + +# Fully-qualified package names that we want to instrument for coverage information. +readonly KUBE_COVERAGE_INSTRUMENTED_PACKAGES=( + k8s.io/kubernetes/cmd/kube-apiserver + k8s.io/kubernetes/cmd/kube-controller-manager + k8s.io/kubernetes/cmd/kube-scheduler + k8s.io/kubernetes/cmd/kube-proxy + k8s.io/kubernetes/cmd/kubelet +) + +# KUBE_CGO_OVERRIDES is a space-separated list of binaries which should be built +# with CGO enabled, assuming CGO is supported on the target platform. +# This overrides any entry in KUBE_STATIC_LIBRARIES. +IFS=" " read -ra KUBE_CGO_OVERRIDES <<< "${KUBE_CGO_OVERRIDES:-}" +readonly KUBE_CGO_OVERRIDES +# KUBE_STATIC_OVERRIDES is a space-separated list of binaries which should be +# built with CGO disabled. This is in addition to the list in +# KUBE_STATIC_LIBRARIES. +IFS=" " read -ra KUBE_STATIC_OVERRIDES <<< "${KUBE_STATIC_OVERRIDES:-}" +readonly KUBE_STATIC_OVERRIDES + +kube::golang::is_statically_linked_library() { + local e + # Explicitly enable cgo when building kubectl for darwin from darwin. + [[ "$(go env GOHOSTOS)" == "darwin" && "$(go env GOOS)" == "darwin" && + "$1" == *"/kubectl" ]] && return 1 + if [[ -n "${KUBE_CGO_OVERRIDES:+x}" ]]; then + for e in "${KUBE_CGO_OVERRIDES[@]}"; do [[ "${1}" == *"/${e}" ]] && return 1; done; + fi + for e in "${KUBE_STATIC_LIBRARIES[@]}"; do [[ "${1}" == *"/${e}" ]] && return 0; done; + if [[ -n "${KUBE_STATIC_OVERRIDES:+x}" ]]; then + for e in "${KUBE_STATIC_OVERRIDES[@]}"; do [[ "${1}" == *"/${e}" ]] && return 0; done; + fi + return 1; +} + +# kube::binaries_from_targets take a list of build targets and return the +# full go package to be built +kube::golang::binaries_from_targets() { + local target + for target; do + # If the target starts with what looks like a domain name, assume it has a + # fully-qualified package name rather than one that needs the Kubernetes + # package prepended. + if [[ "${target}" =~ ^([[:alnum:]]+".")+[[:alnum:]]+"/" ]]; then + echo "${target}" + else + echo "${KUBE_GO_PACKAGE}/${target}" + fi + done +} + +# Asks golang what it thinks the host platform is. The go tool chain does some +# slightly different things when the target platform matches the host platform. +kube::golang::host_platform() { + echo "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" +} + +# Takes the platform name ($1) and sets the appropriate golang env variables +# for that platform. +kube::golang::set_platform_envs() { + [[ -n ${1-} ]] || { + kube::log::error_exit "!!! Internal error. No platform set in kube::golang::set_platform_envs" + } + + export GOOS=${platform%/*} + export GOARCH=${platform##*/} + + # Do not set CC when building natively on a platform, only if cross-compiling from linux/amd64 + if [[ $(kube::golang::host_platform) == "linux/amd64" ]]; then + # Dynamic CGO linking for other server architectures than linux/amd64 goes here + # If you want to include support for more server platforms than these, add arch-specific gcc names here + case "${platform}" in + "linux/arm") + export CGO_ENABLED=1 + export CC=arm-linux-gnueabihf-gcc + ;; + "linux/arm64") + export CGO_ENABLED=1 + export CC=aarch64-linux-gnu-gcc + ;; + "linux/ppc64le") + export CGO_ENABLED=1 + export CC=powerpc64le-linux-gnu-gcc + ;; + "linux/s390x") + export CGO_ENABLED=1 + export CC=s390x-linux-gnu-gcc + ;; + esac + fi +} + +kube::golang::unset_platform_envs() { + unset GOOS + unset GOARCH + unset GOROOT + unset CGO_ENABLED + unset CC +} + +# Create the GOPATH tree under $KUBE_OUTPUT +kube::golang::create_gopath_tree() { + local go_pkg_dir="${KUBE_GOPATH}/src/${KUBE_GO_PACKAGE}" + local go_pkg_basedir + go_pkg_basedir=$(dirname "${go_pkg_dir}") + + mkdir -p "${go_pkg_basedir}" + + # TODO: This symlink should be relative. + if [[ ! -e "${go_pkg_dir}" || "$(readlink "${go_pkg_dir}")" != "${KUBE_ROOT}" ]]; then + ln -snf "${KUBE_ROOT}" "${go_pkg_dir}" + fi + + # Using bazel with a recursive target (e.g. bazel test ...) will abort due to + # the symlink loop created in this function, so create this special file which + # tells bazel not to follow the symlink. + touch "${go_pkg_basedir}/DONT_FOLLOW_SYMLINKS_WHEN_TRAVERSING_THIS_DIRECTORY_VIA_A_RECURSIVE_TARGET_PATTERN" + # Additionally, the //:package-srcs glob recursively includes all + # subdirectories, and similarly fails due to the symlink loop. By creating a + # BUILD.bazel file, we effectively create a dummy package, which stops the + # glob from descending further into the tree and hitting the loop. + cat >"${KUBE_GOPATH}/BUILD.bazel" <` only works for a single pkg. + local subdir + subdir=$(kube::realpath . | sed "s|${KUBE_ROOT}||") + cd "${KUBE_GOPATH}/src/${KUBE_GO_PACKAGE}/${subdir}" || return 1 + + # Set GOROOT so binaries that parse code can work properly. + GOROOT=$(go env GOROOT) + export GOROOT + + # Unset GOBIN in case it already exists in the current session. + unset GOBIN + + # This seems to matter to some tools (godep, ginkgo...) + export GO15VENDOREXPERIMENT=1 +} + +# This will take binaries from $GOPATH/bin and copy them to the appropriate +# place in ${KUBE_OUTPUT_BINDIR} +# +# Ideally this wouldn't be necessary and we could just set GOBIN to +# KUBE_OUTPUT_BINDIR but that won't work in the face of cross compilation. 'go +# install' will place binaries that match the host platform directly in $GOBIN +# while placing cross compiled binaries into `platform_arch` subdirs. This +# complicates pretty much everything else we do around packaging and such. +kube::golang::place_bins() { + local host_platform + host_platform=$(kube::golang::host_platform) + + V=2 kube::log::status "Placing binaries" + + local platform + for platform in "${KUBE_CLIENT_PLATFORMS[@]}"; do + # The substitution on platform_src below will replace all slashes with + # underscores. It'll transform darwin/amd64 -> darwin_amd64. + local platform_src="/${platform//\//_}" + if [[ "${platform}" == "${host_platform}" ]]; then + platform_src="" + rm -f "${THIS_PLATFORM_BIN}" + ln -s "${KUBE_OUTPUT_BINPATH}/${platform}" "${THIS_PLATFORM_BIN}" + fi + + local full_binpath_src="${KUBE_GOPATH}/bin${platform_src}" + if [[ -d "${full_binpath_src}" ]]; then + mkdir -p "${KUBE_OUTPUT_BINPATH}/${platform}" + find "${full_binpath_src}" -maxdepth 1 -type f -exec \ + rsync -pc {} "${KUBE_OUTPUT_BINPATH}/${platform}" \; + fi + done +} + +# Try and replicate the native binary placement of go install without +# calling go install. +kube::golang::outfile_for_binary() { + local binary=$1 + local platform=$2 + local output_path="${KUBE_GOPATH}/bin" + local bin + bin=$(basename "${binary}") + if [[ "${platform}" != "${host_platform}" ]]; then + output_path="${output_path}/${platform//\//_}" + fi + if [[ ${GOOS} == "windows" ]]; then + bin="${bin}.exe" + fi + echo "${output_path}/${bin}" +} + +# Argument: the name of a Kubernetes package. +# Returns 0 if the binary can be built with coverage, 1 otherwise. +# NB: this ignores whether coverage is globally enabled or not. +kube::golang::is_instrumented_package() { + kube::util::array_contains "$1" "${KUBE_COVERAGE_INSTRUMENTED_PACKAGES[@]}" + return $? +} + +# Argument: the name of a Kubernetes package (e.g. k8s.io/kubernetes/cmd/kube-scheduler) +# Echos the path to a dummy test used for coverage information. +kube::golang::path_for_coverage_dummy_test() { + local package="$1" + local path="${KUBE_GOPATH}/src/${package}" + local name + name=$(basename "${package}") + echo "${path}/zz_generated_${name}_test.go" +} + +# Argument: the name of a Kubernetes package (e.g. k8s.io/kubernetes/cmd/kube-scheduler). +# Creates a dummy unit test on disk in the source directory for the given package. +# This unit test will invoke the package's standard entry point when run. +kube::golang::create_coverage_dummy_test() { + local package="$1" + local name + name="$(basename "${package}")" + cat < "$(kube::golang::path_for_coverage_dummy_test "${package}")" +package main +import ( + "testing" + "k8s.io/kubernetes/pkg/util/coverage" +) + +func TestMain(m *testing.M) { + // Get coverage running + coverage.InitCoverage("${name}") + + // Go! + main() + + // Make sure we actually write the profiling information to disk, if we make it here. + // On long-running services, or anything that calls os.Exit(), this is insufficient, + // so we also flush periodically with a default period of five seconds (configurable by + // the KUBE_COVERAGE_FLUSH_INTERVAL environment variable). + coverage.FlushCoverage() +} +EOF +} + +# Argument: the name of a Kubernetes package (e.g. k8s.io/kubernetes/cmd/kube-scheduler). +# Deletes a test generated by kube::golang::create_coverage_dummy_test. +# It is not an error to call this for a nonexistent test. +kube::golang::delete_coverage_dummy_test() { + local package="$1" + rm -f "$(kube::golang::path_for_coverage_dummy_test "${package}")" +} + +# Arguments: a list of kubernetes packages to build. +# Expected variables: ${build_args} should be set to an array of Go build arguments. +# In addition, ${package} and ${platform} should have been set earlier, and if +# ${KUBE_BUILD_WITH_COVERAGE} is set, coverage instrumentation will be enabled. +# +# Invokes Go to actually build some packages. If coverage is disabled, simply invokes +# go install. If coverage is enabled, builds covered binaries using go test, temporarily +# producing the required unit test files and then cleaning up after itself. +# Non-covered binaries are then built using go install as usual. +kube::golang::build_some_binaries() { + if [[ -n "${KUBE_BUILD_WITH_COVERAGE:-}" ]]; then + local -a uncovered=() + for package in "$@"; do + if kube::golang::is_instrumented_package "${package}"; then + V=2 kube::log::info "Building ${package} with coverage..." + + kube::golang::create_coverage_dummy_test "${package}" + kube::util::trap_add "kube::golang::delete_coverage_dummy_test \"${package}\"" EXIT + + go test -c -o "$(kube::golang::outfile_for_binary "${package}" "${platform}")" \ + -covermode count \ + -coverpkg k8s.io/...,k8s.io/kubernetes/vendor/k8s.io/... \ + "${build_args[@]}" \ + -tags coverage \ + "${package}" + else + uncovered+=("${package}") + fi + done + if [[ "${#uncovered[@]}" != 0 ]]; then + V=2 kube::log::info "Building ${uncovered[*]} without coverage..." + go install "${build_args[@]}" "${uncovered[@]}" + else + V=2 kube::log::info "Nothing to build without coverage." + fi + else + V=2 kube::log::info "Coverage is disabled." + go install "${build_args[@]}" "$@" + fi +} + +kube::golang::build_binaries_for_platform() { + local platform=$1 + + local -a statics=() + local -a nonstatics=() + local -a tests=() + + V=2 kube::log::info "Env for ${platform}: GOOS=${GOOS-} GOARCH=${GOARCH-} GOROOT=${GOROOT-} CGO_ENABLED=${CGO_ENABLED-} CC=${CC-}" + + for binary in "${binaries[@]}"; do + if [[ "${binary}" =~ ".test"$ ]]; then + tests+=("${binary}") + elif kube::golang::is_statically_linked_library "${binary}"; then + statics+=("${binary}") + else + nonstatics+=("${binary}") + fi + done + + local -a build_args + if [[ "${#statics[@]}" != 0 ]]; then + build_args=( + -installsuffix static + ${goflags:+"${goflags[@]}"} + -gcflags "${gogcflags:-}" + -asmflags "${goasmflags:-}" + -ldflags "${goldflags:-}" + ) + CGO_ENABLED=0 kube::golang::build_some_binaries "${statics[@]}" + fi + + if [[ "${#nonstatics[@]}" != 0 ]]; then + build_args=( + ${goflags:+"${goflags[@]}"} + -gcflags "${gogcflags:-}" + -asmflags "${goasmflags:-}" + -ldflags "${goldflags:-}" + ) + kube::golang::build_some_binaries "${nonstatics[@]}" + fi + + for test in "${tests[@]:+${tests[@]}}"; do + local outfile testpkg + outfile=$(kube::golang::outfile_for_binary "${test}" "${platform}") + testpkg=$(dirname "${test}") + + mkdir -p "$(dirname "${outfile}")" + go test -c \ + ${goflags:+"${goflags[@]}"} \ + -gcflags "${gogcflags:-}" \ + -asmflags "${goasmflags:-}" \ + -ldflags "${goldflags:-}" \ + -o "${outfile}" \ + "${testpkg}" + done +} + +# Return approximate physical memory available in gigabytes. +kube::golang::get_physmem() { + local mem + + # Linux kernel version >=3.14, in kb + if mem=$(grep MemAvailable /proc/meminfo | awk '{ print $2 }'); then + echo $(( mem / 1048576 )) + return + fi + + # Linux, in kb + if mem=$(grep MemTotal /proc/meminfo | awk '{ print $2 }'); then + echo $(( mem / 1048576 )) + return + fi + + # OS X, in bytes. Note that get_physmem, as used, should only ever + # run in a Linux container (because it's only used in the multiple + # platform case, which is a Dockerized build), but this is provided + # for completeness. + if mem=$(sysctl -n hw.memsize 2>/dev/null); then + echo $(( mem / 1073741824 )) + return + fi + + # If we can't infer it, just give up and assume a low memory system + echo 1 +} + +# Build binaries targets specified +# +# Input: +# $@ - targets and go flags. If no targets are set then all binaries targets +# are built. +# KUBE_BUILD_PLATFORMS - Incoming variable of targets to build for. If unset +# then just the host architecture is built. +kube::golang::build_binaries() { + # Create a sub-shell so that we don't pollute the outer environment + ( + # Check for `go` binary and set ${GOPATH}. + kube::golang::setup_env + V=2 kube::log::info "Go version: $(go version)" + + local host_platform + host_platform=$(kube::golang::host_platform) + + local goflags goldflags goasmflags gogcflags + goldflags="${GOLDFLAGS:-} -s -w $(kube::version::ldflags)" + goasmflags="-trimpath=${KUBE_ROOT}" + gogcflags="${GOGCFLAGS:-} -trimpath=${KUBE_ROOT}" + + local -a targets=() + local arg + + for arg; do + if [[ "${arg}" == -* ]]; then + # Assume arguments starting with a dash are flags to pass to go. + goflags+=("${arg}") + else + targets+=("${arg}") + fi + done + + if [[ ${#targets[@]} -eq 0 ]]; then + targets=("${KUBE_ALL_TARGETS[@]}") + fi + + local -a platforms + IFS=" " read -ra platforms <<< "${KUBE_BUILD_PLATFORMS:-}" + if [[ ${#platforms[@]} -eq 0 ]]; then + platforms=("${host_platform}") + fi + + local -a binaries + while IFS="" read -r binary; do binaries+=("$binary"); done < <(kube::golang::binaries_from_targets "${targets[@]}") + + local parallel=false + if [[ ${#platforms[@]} -gt 1 ]]; then + local gigs + gigs=$(kube::golang::get_physmem) + + if [[ ${gigs} -ge ${KUBE_PARALLEL_BUILD_MEMORY} ]]; then + kube::log::status "Multiple platforms requested and available ${gigs}G >= threshold ${KUBE_PARALLEL_BUILD_MEMORY}G, building platforms in parallel" + parallel=true + else + kube::log::status "Multiple platforms requested, but available ${gigs}G < threshold ${KUBE_PARALLEL_BUILD_MEMORY}G, building platforms in serial" + parallel=false + fi + fi + + if [[ "${parallel}" == "true" ]]; then + kube::log::status "Building go targets for {${platforms[*]}} in parallel (output will appear in a burst when complete):" "${targets[@]}" + local platform + for platform in "${platforms[@]}"; do ( + kube::golang::set_platform_envs "${platform}" + kube::log::status "${platform}: build started" + kube::golang::build_binaries_for_platform "${platform}" + kube::log::status "${platform}: build finished" + ) &> "/tmp//${platform//\//_}.build" & + done + + local fails=0 + for job in $(jobs -p); do + wait "${job}" || (( fails+=1 )) + done + + for platform in "${platforms[@]}"; do + cat "/tmp//${platform//\//_}.build" + done + + exit ${fails} + else + for platform in "${platforms[@]}"; do + kube::log::status "Building go targets for ${platform}:" "${targets[@]}" + ( + kube::golang::set_platform_envs "${platform}" + kube::golang::build_binaries_for_platform "${platform}" + ) + done + fi + ) +} diff --git a/vendor/k8s.io/kubernetes/hack/lib/init.sh b/vendor/k8s.io/kubernetes/hack/lib/init.sh new file mode 100755 index 000000000..f169a609b --- /dev/null +++ b/vendor/k8s.io/kubernetes/hack/lib/init.sh @@ -0,0 +1,187 @@ +#!/usr/bin/env bash + +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +# Unset CDPATH so that path interpolation can work correctly +# https://github.com/kubernetes/kubernetes/issues/52255 +unset CDPATH + +# The root of the build/dist directory +KUBE_ROOT="$(cd "$(dirname "${BASH_SOURCE}")/../.." && pwd -P)" + +KUBE_OUTPUT_SUBPATH="${KUBE_OUTPUT_SUBPATH:-_output/local}" +KUBE_OUTPUT="${KUBE_ROOT}/${KUBE_OUTPUT_SUBPATH}" +KUBE_OUTPUT_BINPATH="${KUBE_OUTPUT}/bin" + +# This controls rsync compression. Set to a value > 0 to enable rsync +# compression for build container +KUBE_RSYNC_COMPRESS="${KUBE_RSYNC_COMPRESS:-0}" + +# Set no_proxy for localhost if behind a proxy, otherwise, +# the connections to localhost in scripts will time out +export no_proxy=127.0.0.1,localhost + +# This is a symlink to binaries for "this platform", e.g. build tools. +THIS_PLATFORM_BIN="${KUBE_ROOT}/_output/bin" + +source "${KUBE_ROOT}/hack/lib/util.sh" +source "${KUBE_ROOT}/hack/lib/logging.sh" + +kube::log::install_errexit + +source "${KUBE_ROOT}/hack/lib/version.sh" +source "${KUBE_ROOT}/hack/lib/golang.sh" +source "${KUBE_ROOT}/hack/lib/etcd.sh" + +KUBE_OUTPUT_HOSTBIN="${KUBE_OUTPUT_BINPATH}/$(kube::util::host_platform)" + +# list of all available group versions. This should be used when generated code +# or when starting an API server that you want to have everything. +# most preferred version for a group should appear first +KUBE_AVAILABLE_GROUP_VERSIONS="${KUBE_AVAILABLE_GROUP_VERSIONS:-\ +v1 \ +admissionregistration.k8s.io/v1beta1 \ +admission.k8s.io/v1beta1 \ +apps/v1 \ +apps/v1beta1 \ +apps/v1beta2 \ +auditregistration.k8s.io/v1alpha1 \ +authentication.k8s.io/v1 \ +authentication.k8s.io/v1beta1 \ +authorization.k8s.io/v1 \ +authorization.k8s.io/v1beta1 \ +autoscaling/v1 \ +autoscaling/v2beta1 \ +autoscaling/v2beta2 \ +batch/v1 \ +batch/v1beta1 \ +batch/v2alpha1 \ +certificates.k8s.io/v1beta1 \ +coordination.k8s.io/v1beta1 \ +coordination.k8s.io/v1 \ +extensions/v1beta1 \ +events.k8s.io/v1beta1 \ +imagepolicy.k8s.io/v1alpha1 \ +networking.k8s.io/v1 \ +networking.k8s.io/v1beta1 \ +node.k8s.io/v1alpha1 \ +node.k8s.io/v1beta1 \ +policy/v1beta1 \ +rbac.authorization.k8s.io/v1 \ +rbac.authorization.k8s.io/v1beta1 \ +rbac.authorization.k8s.io/v1alpha1 \ +scheduling.k8s.io/v1alpha1 \ +scheduling.k8s.io/v1beta1 \ +scheduling.k8s.io/v1 \ +settings.k8s.io/v1alpha1 \ +storage.k8s.io/v1beta1 \ +storage.k8s.io/v1 \ +storage.k8s.io/v1alpha1 \ +}" + +# not all group versions are exposed by the server. This list contains those +# which are not available so we don't generate clients or swagger for them +KUBE_NONSERVER_GROUP_VERSIONS=" + abac.authorization.kubernetes.io/v0 \ + abac.authorization.kubernetes.io/v1beta1 \ + componentconfig/v1alpha1 \ + imagepolicy.k8s.io/v1alpha1\ + admission.k8s.io/v1beta1\ +" + +# This emulates "readlink -f" which is not available on MacOS X. +# Test: +# T=/tmp/$$.$RANDOM +# mkdir $T +# touch $T/file +# mkdir $T/dir +# ln -s $T/file $T/linkfile +# ln -s $T/dir $T/linkdir +# function testone() { +# X=$(readlink -f $1 2>&1) +# Y=$(kube::readlinkdashf $1 2>&1) +# if [ "$X" != "$Y" ]; then +# echo readlinkdashf $1: expected "$X", got "$Y" +# fi +# } +# testone / +# testone /tmp +# testone $T +# testone $T/file +# testone $T/dir +# testone $T/linkfile +# testone $T/linkdir +# testone $T/nonexistant +# testone $T/linkdir/file +# testone $T/linkdir/dir +# testone $T/linkdir/linkfile +# testone $T/linkdir/linkdir +function kube::readlinkdashf { + # run in a subshell for simpler 'cd' + ( + if [[ -d "${1}" ]]; then # This also catch symlinks to dirs. + cd "${1}" + pwd -P + else + cd "$(dirname "${1}")" + local f + f=$(basename "${1}") + if [[ -L "${f}" ]]; then + readlink "${f}" + else + echo "$(pwd -P)/${f}" + fi + fi + ) +} + +# This emulates "realpath" which is not available on MacOS X +# Test: +# T=/tmp/$$.$RANDOM +# mkdir $T +# touch $T/file +# mkdir $T/dir +# ln -s $T/file $T/linkfile +# ln -s $T/dir $T/linkdir +# function testone() { +# X=$(realpath $1 2>&1) +# Y=$(kube::realpath $1 2>&1) +# if [ "$X" != "$Y" ]; then +# echo realpath $1: expected "$X", got "$Y" +# fi +# } +# testone / +# testone /tmp +# testone $T +# testone $T/file +# testone $T/dir +# testone $T/linkfile +# testone $T/linkdir +# testone $T/nonexistant +# testone $T/linkdir/file +# testone $T/linkdir/dir +# testone $T/linkdir/linkfile +# testone $T/linkdir/linkdir +kube::realpath() { + if [[ ! -e "${1}" ]]; then + echo "${1}: No such file or directory" >&2 + return 1 + fi + kube::readlinkdashf "${1}" +} diff --git a/vendor/k8s.io/kubernetes/hack/lib/logging.sh b/vendor/k8s.io/kubernetes/hack/lib/logging.sh new file mode 100644 index 000000000..e8de267d2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/hack/lib/logging.sh @@ -0,0 +1,171 @@ +#!/usr/bin/env bash + +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Controls verbosity of the script output and logging. +KUBE_VERBOSE="${KUBE_VERBOSE:-5}" + +# Handler for when we exit automatically on an error. +# Borrowed from https://gist.github.com/ahendrix/7030300 +kube::log::errexit() { + local err="${PIPESTATUS[@]}" + + # If the shell we are in doesn't have errexit set (common in subshells) then + # don't dump stacks. + set +o | grep -qe "-o errexit" || return + + set +o xtrace + local code="${1:-1}" + # Print out the stack trace described by $function_stack + if [ ${#FUNCNAME[@]} -gt 2 ] + then + kube::log::error "Call tree:" + for ((i=1;i<${#FUNCNAME[@]}-1;i++)) + do + kube::log::error " ${i}: ${BASH_SOURCE[${i}+1]}:${BASH_LINENO[${i}]} ${FUNCNAME[${i}]}(...)" + done + fi + kube::log::error_exit "Error in ${BASH_SOURCE[1]}:${BASH_LINENO[0]}. '${BASH_COMMAND}' exited with status ${err}" "${1:-1}" 1 +} + +kube::log::install_errexit() { + # trap ERR to provide an error handler whenever a command exits nonzero this + # is a more verbose version of set -o errexit + trap 'kube::log::errexit' ERR + + # setting errtrace allows our ERR trap handler to be propagated to functions, + # expansions and subshells + set -o errtrace +} + +# Print out the stack trace +# +# Args: +# $1 The number of stack frames to skip when printing. +kube::log::stack() { + local stack_skip=${1:-0} + stack_skip=$((stack_skip + 1)) + if [[ ${#FUNCNAME[@]} -gt ${stack_skip} ]]; then + echo "Call stack:" >&2 + local i + for ((i=1 ; i <= ${#FUNCNAME[@]} - ${stack_skip} ; i++)) + do + local frame_no=$((i - 1 + stack_skip)) + local source_file=${BASH_SOURCE[${frame_no}]} + local source_lineno=${BASH_LINENO[$((frame_no - 1))]} + local funcname=${FUNCNAME[${frame_no}]} + echo " ${i}: ${source_file}:${source_lineno} ${funcname}(...)" >&2 + done + fi +} + +# Log an error and exit. +# Args: +# $1 Message to log with the error +# $2 The error code to return +# $3 The number of stack frames to skip when printing. +kube::log::error_exit() { + local message="${1:-}" + local code="${2:-1}" + local stack_skip="${3:-0}" + stack_skip=$((stack_skip + 1)) + + if [[ ${KUBE_VERBOSE} -ge 4 ]]; then + local source_file=${BASH_SOURCE[${stack_skip}]} + local source_line=${BASH_LINENO[$((stack_skip - 1))]} + echo "!!! Error in ${source_file}:${source_line}" >&2 + [[ -z ${1-} ]] || { + echo " ${1}" >&2 + } + + kube::log::stack ${stack_skip} + + echo "Exiting with status ${code}" >&2 + fi + + exit "${code}" +} + +# Log an error but keep going. Don't dump the stack or exit. +kube::log::error() { + timestamp=$(date +"[%m%d %H:%M:%S]") + echo "!!! ${timestamp} ${1-}" >&2 + shift + for message; do + echo " ${message}" >&2 + done +} + +# Print an usage message to stderr. The arguments are printed directly. +kube::log::usage() { + echo >&2 + local message + for message; do + echo "${message}" >&2 + done + echo >&2 +} + +kube::log::usage_from_stdin() { + local messages=() + while read -r line; do + messages+=("${line}") + done + + kube::log::usage "${messages[@]}" +} + +# Print out some info that isn't a top level status line +kube::log::info() { + local V="${V:-0}" + if [[ ${KUBE_VERBOSE} < ${V} ]]; then + return + fi + + for message; do + echo "${message}" + done +} + +# Just like kube::log::info, but no \n, so you can make a progress bar +kube::log::progress() { + for message; do + echo -e -n "${message}" + done +} + +kube::log::info_from_stdin() { + local messages=() + while read -r line; do + messages+=("${line}") + done + + kube::log::info "${messages[@]}" +} + +# Print a status line. Formatted to show up in a stream of output. +kube::log::status() { + local V="${V:-0}" + if [[ ${KUBE_VERBOSE} < ${V} ]]; then + return + fi + + timestamp=$(date +"[%m%d %H:%M:%S]") + echo "+++ ${timestamp} ${1}" + shift + for message; do + echo " ${message}" + done +} diff --git a/vendor/k8s.io/kubernetes/hack/lib/util.sh b/vendor/k8s.io/kubernetes/hack/lib/util.sh new file mode 100755 index 000000000..31ce9fc4a --- /dev/null +++ b/vendor/k8s.io/kubernetes/hack/lib/util.sh @@ -0,0 +1,839 @@ +#!/usr/bin/env bash + +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +function kube::util::sourced_variable { + # Call this function to tell shellcheck that a variable is supposed to + # be used from other calling context. This helps quiet an "unused + # variable" warning from shellcheck and also document your code. + true +} + +kube::util::sortable_date() { + date "+%Y%m%d-%H%M%S" +} + +# arguments: target, item1, item2, item3, ... +# returns 0 if target is in the given items, 1 otherwise. +kube::util::array_contains() { + local search="$1" + local element + shift + for element; do + if [[ "${element}" == "${search}" ]]; then + return 0 + fi + done + return 1 +} + +kube::util::wait_for_url() { + local url=$1 + local prefix=${2:-} + local wait=${3:-1} + local times=${4:-30} + local maxtime=${5:-1} + + command -v curl >/dev/null || { + kube::log::usage "curl must be installed" + exit 1 + } + + local i + for i in $(seq 1 "${times}"); do + local out + if out=$(curl --max-time "${maxtime}" -gkfs "${url}" 2>/dev/null); then + kube::log::status "On try ${i}, ${prefix}: ${out}" + return 0 + fi + sleep "${wait}" + done + kube::log::error "Timed out waiting for ${prefix} to answer at ${url}; tried ${times} waiting ${wait} between each" + return 1 +} + +# Example: kube::util::trap_add 'echo "in trap DEBUG"' DEBUG +# See: http://stackoverflow.com/questions/3338030/multiple-bash-traps-for-the-same-signal +kube::util::trap_add() { + local trap_add_cmd + trap_add_cmd=$1 + shift + + for trap_add_name in "$@"; do + local existing_cmd + local new_cmd + + # Grab the currently defined trap commands for this trap + existing_cmd=$(trap -p "${trap_add_name}" | awk -F"'" '{print $2}') + + if [[ -z "${existing_cmd}" ]]; then + new_cmd="${trap_add_cmd}" + else + new_cmd="${trap_add_cmd};${existing_cmd}" + fi + + # Assign the test. Disable the shellcheck warning telling that trap + # commands should be single quoted to avoid evaluating them at this + # point instead evaluating them at run time. The logic of adding new + # commands to a single trap requires them to be evaluated right away. + # shellcheck disable=SC2064 + trap "${new_cmd}" "${trap_add_name}" + done +} + +# Opposite of kube::util::ensure-temp-dir() +kube::util::cleanup-temp-dir() { + rm -rf "${KUBE_TEMP}" +} + +# Create a temp dir that'll be deleted at the end of this bash session. +# +# Vars set: +# KUBE_TEMP +kube::util::ensure-temp-dir() { + if [[ -z ${KUBE_TEMP-} ]]; then + KUBE_TEMP=$(mktemp -d 2>/dev/null || mktemp -d -t kubernetes.XXXXXX) + kube::util::trap_add kube::util::cleanup-temp-dir EXIT + fi +} + +kube::util::host_os() { + local host_os + case "$(uname -s)" in + Darwin) + host_os=darwin + ;; + Linux) + host_os=linux + ;; + *) + kube::log::error "Unsupported host OS. Must be Linux or Mac OS X." + exit 1 + ;; + esac + echo "${host_os}" +} + +kube::util::host_arch() { + local host_arch + case "$(uname -m)" in + x86_64*) + host_arch=amd64 + ;; + i?86_64*) + host_arch=amd64 + ;; + amd64*) + host_arch=amd64 + ;; + aarch64*) + host_arch=arm64 + ;; + arm64*) + host_arch=arm64 + ;; + arm*) + host_arch=arm + ;; + i?86*) + host_arch=x86 + ;; + s390x*) + host_arch=s390x + ;; + ppc64le*) + host_arch=ppc64le + ;; + *) + kube::log::error "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le." + exit 1 + ;; + esac + echo "${host_arch}" +} + +# This figures out the host platform without relying on golang. We need this as +# we don't want a golang install to be a prerequisite to building yet we need +# this info to figure out where the final binaries are placed. +kube::util::host_platform() { + echo "$(kube::util::host_os)/$(kube::util::host_arch)" +} + +kube::util::find-binary-for-platform() { + local -r lookfor="$1" + local -r platform="$2" + local locations=( + "${KUBE_ROOT}/_output/bin/${lookfor}" + "${KUBE_ROOT}/_output/dockerized/bin/${platform}/${lookfor}" + "${KUBE_ROOT}/_output/local/bin/${platform}/${lookfor}" + "${KUBE_ROOT}/platforms/${platform}/${lookfor}" + ) + # Also search for binary in bazel build tree. + # The bazel go rules place some binaries in subtrees like + # "bazel-bin/source/path/linux_amd64_pure_stripped/binaryname", so make sure + # the platform name is matched in the path. + while IFS=$'\n' read -r location; do + locations+=("$location"); + done < <(find "${KUBE_ROOT}/bazel-bin/" -type f -executable \ + \( -path "*/${platform/\//_}*/${lookfor}" -o -path "*/${lookfor}" \) 2>/dev/null || true) + + # List most recently-updated location. + local -r bin=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 ) + echo -n "${bin}" +} + +kube::util::find-binary() { + kube::util::find-binary-for-platform "$1" "$(kube::util::host_platform)" +} + +# Run all known doc generators (today gendocs and genman for kubectl) +# $1 is the directory to put those generated documents +kube::util::gen-docs() { + local dest="$1" + + # Find binary + gendocs=$(kube::util::find-binary "gendocs") + genkubedocs=$(kube::util::find-binary "genkubedocs") + genman=$(kube::util::find-binary "genman") + genyaml=$(kube::util::find-binary "genyaml") + genfeddocs=$(kube::util::find-binary "genfeddocs") + + # TODO: If ${genfeddocs} is not used from anywhere (it isn't used at + # least from k/k tree), remove it completely. + kube::util::sourced_variable "${genfeddocs}" + + mkdir -p "${dest}/docs/user-guide/kubectl/" + "${gendocs}" "${dest}/docs/user-guide/kubectl/" + mkdir -p "${dest}/docs/admin/" + "${genkubedocs}" "${dest}/docs/admin/" "kube-apiserver" + "${genkubedocs}" "${dest}/docs/admin/" "kube-controller-manager" + "${genkubedocs}" "${dest}/docs/admin/" "cloud-controller-manager" + "${genkubedocs}" "${dest}/docs/admin/" "kube-proxy" + "${genkubedocs}" "${dest}/docs/admin/" "kube-scheduler" + "${genkubedocs}" "${dest}/docs/admin/" "kubelet" + "${genkubedocs}" "${dest}/docs/admin/" "kubeadm" + + mkdir -p "${dest}/docs/man/man1/" + "${genman}" "${dest}/docs/man/man1/" "kube-apiserver" + "${genman}" "${dest}/docs/man/man1/" "kube-controller-manager" + "${genman}" "${dest}/docs/man/man1/" "cloud-controller-manager" + "${genman}" "${dest}/docs/man/man1/" "kube-proxy" + "${genman}" "${dest}/docs/man/man1/" "kube-scheduler" + "${genman}" "${dest}/docs/man/man1/" "kubelet" + "${genman}" "${dest}/docs/man/man1/" "kubectl" + "${genman}" "${dest}/docs/man/man1/" "kubeadm" + + mkdir -p "${dest}/docs/yaml/kubectl/" + "${genyaml}" "${dest}/docs/yaml/kubectl/" + + # create the list of generated files + pushd "${dest}" > /dev/null || return 1 + touch docs/.generated_docs + find . -type f | cut -sd / -f 2- | LC_ALL=C sort > docs/.generated_docs + popd > /dev/null || return 1 +} + +# Removes previously generated docs-- we don't want to check them in. $KUBE_ROOT +# must be set. +kube::util::remove-gen-docs() { + if [ -e "${KUBE_ROOT}/docs/.generated_docs" ]; then + # remove all of the old docs; we don't want to check them in. + while read -r file; do + rm "${KUBE_ROOT}/${file}" 2>/dev/null || true + done <"${KUBE_ROOT}/docs/.generated_docs" + # The docs/.generated_docs file lists itself, so we don't need to explicitly + # delete it. + fi +} + +# Takes a group/version and returns the path to its location on disk, sans +# "pkg". E.g.: +# * default behavior: extensions/v1beta1 -> apis/extensions/v1beta1 +# * default behavior for only a group: experimental -> apis/experimental +# * Special handling for empty group: v1 -> api/v1, unversioned -> api/unversioned +# * Special handling for groups suffixed with ".k8s.io": foo.k8s.io/v1 -> apis/foo/v1 +# * Very special handling for when both group and version are "": / -> api +kube::util::group-version-to-pkg-path() { + local group_version="$1" + + while IFS=$'\n' read -r api; do + if [[ "${api}" = "${group_version/.*k8s.io/}" ]]; then + echo "vendor/k8s.io/api/${group_version/.*k8s.io/}" + return + fi + done < <(cd "${KUBE_ROOT}/staging/src/k8s.io/api" && find . -name types.go -exec dirname {} \; | sed "s|\./||g" | sort) + + # "v1" is the API GroupVersion + if [[ "${group_version}" == "v1" ]]; then + echo "vendor/k8s.io/api/core/v1" + return + fi + + # Special cases first. + # TODO(lavalamp): Simplify this by moving pkg/api/v1 and splitting pkg/api, + # moving the results to pkg/apis/api. + case "${group_version}" in + # both group and version are "", this occurs when we generate deep copies for internal objects of the legacy v1 API. + __internal) + echo "pkg/apis/core" + ;; + meta/v1) + echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1" + ;; + meta/v1beta1) + echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1" + ;; + *.k8s.io) + echo "pkg/apis/${group_version%.*k8s.io}" + ;; + *.k8s.io/*) + echo "pkg/apis/${group_version/.*k8s.io/}" + ;; + *) + echo "pkg/apis/${group_version%__internal}" + ;; + esac +} + +# Takes a group/version and returns the swagger-spec file name. +# default behavior: extensions/v1beta1 -> extensions_v1beta1 +# special case for v1: v1 -> v1 +kube::util::gv-to-swagger-name() { + local group_version="$1" + case "${group_version}" in + v1) + echo "v1" + ;; + *) + echo "${group_version%/*}_${group_version#*/}" + ;; + esac +} + +# Returns the name of the upstream remote repository name for the local git +# repo, e.g. "upstream" or "origin". +kube::util::git_upstream_remote_name() { + git remote -v | grep fetch |\ + grep -E 'github.com[/:]kubernetes/kubernetes|k8s.io/kubernetes' |\ + head -n 1 | awk '{print $1}' +} + +# Ensures the current directory is a git tree for doing things like restoring or +# validating godeps +kube::util::create-fake-git-tree() { + local -r target_dir=${1:-$(pwd)} + + pushd "${target_dir}" >/dev/null || return 1 + git init >/dev/null + git config --local user.email "nobody@k8s.io" + git config --local user.name "$0" + git add . >/dev/null + git commit -q -m "Snapshot" >/dev/null + if (( ${KUBE_VERBOSE:-5} >= 6 )); then + kube::log::status "${target_dir} is now a git tree." + fi + popd >/dev/null || return 1 +} + +# Checks whether godep restore was run in the current GOPATH, i.e. that all referenced repos exist +# and are checked out to the referenced rev. +kube::util::godep_restored() { + local -r godeps_json=${1:-Godeps/Godeps.json} + local -r gopath=${2:-${GOPATH%:*}} + + kube::util::require-jq + + local root + local old_rev="" + while read -r path rev; do + rev="${rev//[\'\"]}" # remove quotes which are around revs sometimes + + if [[ "${rev}" == "${old_rev}" ]] && [[ "${path}" == "${root}"* ]]; then + # avoid checking the same git/hg root again + continue + fi + + root="${path}" + while [ "${root}" != "." ] && [ ! -d "${gopath}/src/${root}/.git" ] && [ ! -d "${gopath}/src/${root}/.hg" ]; do + root=$(dirname "${root}") + done + if [ "${root}" == "." ]; then + echo "No checkout of ${path} found in GOPATH \"${gopath}\"." 1>&2 + return 1 + fi + local head + if [ -d "${gopath}/src/${root}/.git" ]; then + head="$(cd "${gopath}/src/${root}" && git rev-parse HEAD)" + else + head="$(cd "${gopath}/src/${root}" && hg parent --template '{node}')" + fi + if [ "${head}" != "${rev}" ]; then + echo "Unexpected HEAD '${head}' at ${gopath}/src/${root}, expected '${rev}'." 1>&2 + return 1 + fi + old_rev="${rev}" + done < <(jq '.Deps|.[]|.ImportPath + " " + .Rev' -r < "${godeps_json}") + return 0 +} + +# Exits script if working directory is dirty. If it's run interactively in the terminal +# the user can commit changes in a second terminal. This script will wait. +kube::util::ensure_clean_working_dir() { + while ! git diff HEAD --exit-code &>/dev/null; do + echo -e "\nUnexpected dirty working directory:\n" + if tty -s; then + git status -s + else + git diff -a # be more verbose in log files without tty + exit 1 + fi | sed 's/^/ /' + echo -e "\nCommit your changes in another terminal and then continue here by pressing enter." + read -r + done 1>&2 +} + +# Ensure that the given godep version is installed and in the path. Almost +# nobody should use any version but the default. +# +# Sets: +# KUBE_GODEP: The path to the godep binary +# +kube::util::ensure_godep_version() { + local godep_target_version=${1:-"v80-k8s-r1"} # this version is known to work + + # If KUBE_GODEP is already set, and it's the right version, then use it. + if [[ -n "${KUBE_GODEP:-}" && "$(${KUBE_GODEP:?} version 2>/dev/null)" == *"godep ${godep_target_version}"* ]]; then + kube::log::status "Using ${KUBE_GODEP}" + return + fi + + # Otherwise, install forked godep + kube::log::status "Installing godep version ${godep_target_version}" + GOBIN="${KUBE_OUTPUT_BINPATH}" go install k8s.io/kubernetes/third_party/forked/godep + export KUBE_GODEP="${KUBE_OUTPUT_BINPATH}/godep" + kube::log::status "Installed ${KUBE_GODEP}" + + # Verify that the installed godep from fork is what we expect + if [[ "$(${KUBE_GODEP:?} version 2>/dev/null)" != *"godep ${godep_target_version}"* ]]; then + kube::log::error "Expected godep ${godep_target_version} from ${KUBE_GODEP}, got $(${KUBE_GODEP:?} version)" + return 1 + fi +} + +# Ensure that none of the staging repos is checked out in the GOPATH because this +# easily confused godep. +kube::util::ensure_no_staging_repos_in_gopath() { + kube::util::ensure_single_dir_gopath + local error=0 + for repo_file in "${KUBE_ROOT}"/staging/src/k8s.io/*; do + if [[ ! -d "${repo_file}" ]]; then + # not a directory or there were no files + continue; + fi + repo="$(basename "${repo_file}")" + if [ -e "${GOPATH}/src/k8s.io/${repo}" ]; then + echo "k8s.io/${repo} exists in GOPATH. Remove before running godep-save.sh." 1>&2 + error=1 + fi + done + if [ "${error}" = "1" ]; then + exit 1 + fi +} + +# Checks that the GOPATH is simple, i.e. consists only of one directory, not multiple. +kube::util::ensure_single_dir_gopath() { + if [[ "${GOPATH}" == *:* ]]; then + echo "GOPATH must consist of a single directory." 1>&2 + exit 1 + fi +} + +# Find the base commit using: +# $PULL_BASE_SHA if set (from Prow) +# current ref from the remote upstream branch +kube::util::base_ref() { + local -r git_branch=$1 + + if [[ -n ${PULL_BASE_SHA:-} ]]; then + echo "${PULL_BASE_SHA}" + return + fi + + full_branch="$(kube::util::git_upstream_remote_name)/${git_branch}" + + # make sure the branch is valid, otherwise the check will pass erroneously. + if ! git describe "${full_branch}" >/dev/null; then + # abort! + exit 1 + fi + + echo "${full_branch}" +} + +# Checks whether there are any files matching pattern $2 changed between the +# current branch and upstream branch named by $1. +# Returns 1 (false) if there are no changes +# 0 (true) if there are changes detected. +kube::util::has_changes() { + local -r git_branch=$1 + local -r pattern=$2 + local -r not_pattern=${3:-totallyimpossiblepattern} + + local base_ref + base_ref=$(kube::util::base_ref "${git_branch}") + echo "Checking for '${pattern}' changes against '${base_ref}'" + + # notice this uses ... to find the first shared ancestor + if git diff --name-only "${base_ref}...HEAD" | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then + return 0 + fi + # also check for pending changes + if git status --porcelain | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then + echo "Detected '${pattern}' uncommitted changes." + return 0 + fi + echo "No '${pattern}' changes detected." + return 1 +} + +kube::util::download_file() { + local -r url=$1 + local -r destination_file=$2 + + rm "${destination_file}" 2&> /dev/null || true + + for i in $(seq 5) + do + if ! curl -fsSL --retry 3 --keepalive-time 2 "${url}" -o "${destination_file}"; then + echo "Downloading ${url} failed. $((5-i)) retries left." + sleep 1 + else + echo "Downloading ${url} succeed" + return 0 + fi + done + return 1 +} + +# Test whether openssl is installed. +# Sets: +# OPENSSL_BIN: The path to the openssl binary to use +function kube::util::test_openssl_installed { + if ! openssl version >& /dev/null; then + echo "Failed to run openssl. Please ensure openssl is installed" + exit 1 + fi + + OPENSSL_BIN=$(command -v openssl) +} + +# creates a client CA, args are sudo, dest-dir, ca-id, purpose +# purpose is dropped in after "key encipherment", you usually want +# '"client auth"' +# '"server auth"' +# '"client auth","server auth"' +function kube::util::create_signing_certkey { + local sudo=$1 + local dest_dir=$2 + local id=$3 + local purpose=$4 + # Create client ca + ${sudo} /usr/bin/env bash -e < "${dest_dir}/${id}-ca-config.json" +EOF +} + +# signs a client certificate: args are sudo, dest-dir, CA, filename (roughly), username, groups... +function kube::util::create_client_certkey { + local sudo=$1 + local dest_dir=$2 + local ca=$3 + local id=$4 + local cn=${5:-$4} + local groups="" + local SEP="" + shift 5 + while [ -n "${1:-}" ]; do + groups+="${SEP}{\"O\":\"$1\"}" + SEP="," + shift 1 + done + ${sudo} /usr/bin/env bash -e < /dev/null +apiVersion: v1 +kind: Config +clusters: + - cluster: + certificate-authority: ${ca_file} + server: https://${api_host}:${api_port}/ + name: local-up-cluster +users: + - user: + token: ${token} + client-certificate: ${dest_dir}/client-${client_id}.crt + client-key: ${dest_dir}/client-${client_id}.key + name: local-up-cluster +contexts: + - context: + cluster: local-up-cluster + user: local-up-cluster + name: local-up-cluster +current-context: local-up-cluster +EOF + + # flatten the kubeconfig files to make them self contained + username=$(whoami) + ${sudo} /usr/bin/env bash -e < "/tmp/${client_id}.kubeconfig" + mv -f "/tmp/${client_id}.kubeconfig" "${dest_dir}/${client_id}.kubeconfig" + chown ${username} "${dest_dir}/${client_id}.kubeconfig" +EOF +} + +# Determines if docker can be run, failures may simply require that the user be added to the docker group. +function kube::util::ensure_docker_daemon_connectivity { + IFS=" " read -ra DOCKER <<< "${DOCKER_OPTS}" + # Expand ${DOCKER[@]} only if it's not unset. This is to work around + # Bash 3 issue with unbound variable. + DOCKER=(docker ${DOCKER[@]:+"${DOCKER[@]}"}) + if ! "${DOCKER[@]}" info > /dev/null 2>&1 ; then + cat <<'EOF' >&2 +Can't connect to 'docker' daemon. please fix and retry. + +Possible causes: + - Docker Daemon not started + - Linux: confirm via your init system + - macOS w/ docker-machine: run `docker-machine ls` and `docker-machine start ` + - macOS w/ Docker for Mac: Check the menu bar and start the Docker application + - DOCKER_HOST hasn't been set or is set incorrectly + - Linux: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}` + - macOS w/ docker-machine: run `eval "$(docker-machine env )"` + - macOS w/ Docker for Mac: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}` + - Other things to check: + - Linux: User isn't in 'docker' group. Add and relogin. + - Something like 'sudo usermod -a -G docker ${USER}' + - RHEL7 bug and workaround: https://bugzilla.redhat.com/show_bug.cgi?id=1119282#c8 +EOF + return 1 + fi +} + +# Wait for background jobs to finish. Return with +# an error status if any of the jobs failed. +kube::util::wait-for-jobs() { + local fail=0 + local job + for job in $(jobs -p); do + wait "${job}" || fail=$((fail + 1)) + done + return ${fail} +} + +# kube::util::join +# Concatenates the list elements with the delimiter passed as first parameter +# +# Ex: kube::util::join , a b c +# -> a,b,c +function kube::util::join { + local IFS="$1" + shift + echo "$*" +} + +# Downloads cfssl/cfssljson into $1 directory if they do not already exist in PATH +# +# Assumed vars: +# $1 (cfssl directory) (optional) +# +# Sets: +# CFSSL_BIN: The path of the installed cfssl binary +# CFSSLJSON_BIN: The path of the installed cfssljson binary +# +function kube::util::ensure-cfssl { + if command -v cfssl &>/dev/null && command -v cfssljson &>/dev/null; then + CFSSL_BIN=$(command -v cfssl) + CFSSLJSON_BIN=$(command -v cfssljson) + return 0 + fi + + host_arch=$(kube::util::host_arch) + + if [[ "${host_arch}" != "amd64" ]]; then + echo "Cannot download cfssl on non-amd64 hosts and cfssl does not appear to be installed." + echo "Please install cfssl and cfssljson and verify they are in \$PATH." + echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..." + exit 1 + fi + + # Create a temp dir for cfssl if no directory was given + local cfssldir=${1:-} + if [[ -z "${cfssldir}" ]]; then + kube::util::ensure-temp-dir + cfssldir="${KUBE_TEMP}/cfssl" + fi + + mkdir -p "${cfssldir}" + pushd "${cfssldir}" > /dev/null || return 1 + + echo "Unable to successfully run 'cfssl' from ${PATH}; downloading instead..." + kernel=$(uname -s) + case "${kernel}" in + Linux) + curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 + curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 + ;; + Darwin) + curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_darwin-amd64 + curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_darwin-amd64 + ;; + *) + echo "Unknown, unsupported platform: ${kernel}." >&2 + echo "Supported platforms: Linux, Darwin." >&2 + exit 2 + esac + + chmod +x cfssl || true + chmod +x cfssljson || true + + CFSSL_BIN="${cfssldir}/cfssl" + CFSSLJSON_BIN="${cfssldir}/cfssljson" + if [[ ! -x ${CFSSL_BIN} || ! -x ${CFSSLJSON_BIN} ]]; then + echo "Failed to download 'cfssl'. Please install cfssl and cfssljson and verify they are in \$PATH." + echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..." + exit 1 + fi + popd > /dev/null || return 1 +} + +# kube::util::ensure_dockerized +# Confirms that the script is being run inside a kube-build image +# +function kube::util::ensure_dockerized { + if [[ -f /kube-build-image ]]; then + return 0 + else + echo "ERROR: This script is designed to be run inside a kube-build container" + exit 1 + fi +} + +# kube::util::ensure-gnu-sed +# Determines which sed binary is gnu-sed on linux/darwin +# +# Sets: +# SED: The name of the gnu-sed binary +# +function kube::util::ensure-gnu-sed { + if LANG=C sed --help 2>&1 | grep -q GNU; then + SED="sed" + elif command -v gsed &>/dev/null; then + SED="gsed" + else + kube::log::error "Failed to find GNU sed as sed or gsed. If you are on Mac: brew install gnu-sed." >&2 + return 1 + fi + kube::util::sourced_variable "${SED}" +} + +# kube::util::check-file-in-alphabetical-order +# Check that the file is in alphabetical order +# +function kube::util::check-file-in-alphabetical-order { + local failure_file="$1" + if ! diff -u "${failure_file}" <(LC_ALL=C sort "${failure_file}"); then + { + echo + echo "${failure_file} is not in alphabetical order. Please sort it:" + echo + echo " LC_ALL=C sort -o ${failure_file} ${failure_file}" + echo + } >&2 + false + fi +} + +# kube::util::require-jq +# Checks whether jq is installed. +function kube::util::require-jq { + if ! command -v jq &>/dev/null; then + echo "jq not found. Please install." 1>&2 + return 1 + fi +} + +# Some useful colors. +if [[ -z "${color_start-}" ]]; then + declare -r color_start="\033[" + declare -r color_red="${color_start}0;31m" + declare -r color_yellow="${color_start}0;33m" + declare -r color_green="${color_start}0;32m" + declare -r color_blue="${color_start}1;34m" + declare -r color_cyan="${color_start}1;36m" + declare -r color_norm="${color_start}0m" + + kube::util::sourced_variable "${color_start}" + kube::util::sourced_variable "${color_red}" + kube::util::sourced_variable "${color_yellow}" + kube::util::sourced_variable "${color_green}" + kube::util::sourced_variable "${color_blue}" + kube::util::sourced_variable "${color_cyan}" + kube::util::sourced_variable "${color_norm}" +fi + +# ex: ts=2 sw=2 et filetype=sh diff --git a/vendor/k8s.io/kubernetes/hack/lib/version.sh b/vendor/k8s.io/kubernetes/hack/lib/version.sh new file mode 100644 index 000000000..5096ac3ab --- /dev/null +++ b/vendor/k8s.io/kubernetes/hack/lib/version.sh @@ -0,0 +1,175 @@ +#!/usr/bin/env bash + +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ----------------------------------------------------------------------------- +# Version management helpers. These functions help to set, save and load the +# following variables: +# +# KUBE_GIT_COMMIT - The git commit id corresponding to this +# source code. +# KUBE_GIT_TREE_STATE - "clean" indicates no changes since the git commit id +# "dirty" indicates source code changes after the git commit id +# "archive" indicates the tree was produced by 'git archive' +# KUBE_GIT_VERSION - "vX.Y" used to indicate the last release version. +# KUBE_GIT_MAJOR - The major part of the version +# KUBE_GIT_MINOR - The minor component of the version + +# Grovels through git to set a set of env variables. +# +# If KUBE_GIT_VERSION_FILE, this function will load from that file instead of +# querying git. +kube::version::get_version_vars() { + if [[ -n ${KUBE_GIT_VERSION_FILE-} ]]; then + kube::version::load_version_vars "${KUBE_GIT_VERSION_FILE}" + return + fi + + # If the kubernetes source was exported through git archive, then + # we likely don't have a git tree, but these magic values may be filled in. + if [[ '$Format:%%$' == "%" ]]; then + KUBE_GIT_COMMIT='$Format:%H$' + KUBE_GIT_TREE_STATE="archive" + # When a 'git archive' is exported, the '$Format:%D$' below will look + # something like 'HEAD -> release-1.8, tag: v1.8.3' where then 'tag: ' + # can be extracted from it. + if [[ '$Format:%D$' =~ tag:\ (v[^ ,]+) ]]; then + KUBE_GIT_VERSION="${BASH_REMATCH[1]}" + fi + fi + + local git=(git --work-tree "${KUBE_ROOT}") + + if [[ -n ${KUBE_GIT_COMMIT-} ]] || KUBE_GIT_COMMIT=$("${git[@]}" rev-parse "HEAD^{commit}" 2>/dev/null); then + if [[ -z ${KUBE_GIT_TREE_STATE-} ]]; then + # Check if the tree is dirty. default to dirty + if git_status=$("${git[@]}" status --porcelain 2>/dev/null) && [[ -z ${git_status} ]]; then + KUBE_GIT_TREE_STATE="clean" + else + KUBE_GIT_TREE_STATE="dirty" + fi + fi + + # Use git describe to find the version based on tags. + if [[ -n ${KUBE_GIT_VERSION-} ]] || KUBE_GIT_VERSION=$("${git[@]}" describe --tags --abbrev=14 "${KUBE_GIT_COMMIT}^{commit}" 2>/dev/null); then + # This translates the "git describe" to an actual semver.org + # compatible semantic version that looks something like this: + # v1.1.0-alpha.0.6+84c76d1142ea4d + # + # TODO: We continue calling this "git version" because so many + # downstream consumers are expecting it there. + DASHES_IN_VERSION=$(echo "${KUBE_GIT_VERSION}" | sed "s/[^-]//g") + if [[ "${DASHES_IN_VERSION}" == "---" ]] ; then + # We have distance to subversion (v1.1.0-subversion-1-gCommitHash) + KUBE_GIT_VERSION=$(echo "${KUBE_GIT_VERSION}" | sed "s/-\([0-9]\{1,\}\)-g\([0-9a-f]\{14\}\)$/.\1\+\2/") + elif [[ "${DASHES_IN_VERSION}" == "--" ]] ; then + # We have distance to base tag (v1.1.0-1-gCommitHash) + KUBE_GIT_VERSION=$(echo "${KUBE_GIT_VERSION}" | sed "s/-g\([0-9a-f]\{14\}\)$/+\1/") + fi + if [[ "${KUBE_GIT_TREE_STATE}" == "dirty" ]]; then + # git describe --dirty only considers changes to existing files, but + # that is problematic since new untracked .go files affect the build, + # so use our idea of "dirty" from git status instead. + KUBE_GIT_VERSION+="-dirty" + fi + + + # Try to match the "git describe" output to a regex to try to extract + # the "major" and "minor" versions and whether this is the exact tagged + # version or whether the tree is between two tagged versions. + if [[ "${KUBE_GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?([-].*)?([+].*)?$ ]]; then + KUBE_GIT_MAJOR=${BASH_REMATCH[1]} + KUBE_GIT_MINOR=${BASH_REMATCH[2]} + if [[ -n "${BASH_REMATCH[4]}" ]]; then + KUBE_GIT_MINOR+="+" + fi + fi + + # If KUBE_GIT_VERSION is not a valid Semantic Version, then refuse to build. + if ! [[ "${KUBE_GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?(-[0-9A-Za-z.-]+)?(\+[0-9A-Za-z.-]+)?$ ]]; then + echo "KUBE_GIT_VERSION should be a valid Semantic Version. Current value: ${KUBE_GIT_VERSION}" + echo "Please see more details here: https://semver.org" + exit 1 + fi + fi + fi +} + +# Saves the environment flags to $1 +kube::version::save_version_vars() { + local version_file=${1-} + [[ -n ${version_file} ]] || { + echo "!!! Internal error. No file specified in kube::version::save_version_vars" + return 1 + } + + cat <"${version_file}" +KUBE_GIT_COMMIT='${KUBE_GIT_COMMIT-}' +KUBE_GIT_TREE_STATE='${KUBE_GIT_TREE_STATE-}' +KUBE_GIT_VERSION='${KUBE_GIT_VERSION-}' +KUBE_GIT_MAJOR='${KUBE_GIT_MAJOR-}' +KUBE_GIT_MINOR='${KUBE_GIT_MINOR-}' +EOF +} + +# Loads up the version variables from file $1 +kube::version::load_version_vars() { + local version_file=${1-} + [[ -n ${version_file} ]] || { + echo "!!! Internal error. No file specified in kube::version::load_version_vars" + return 1 + } + + source "${version_file}" +} + +kube::version::ldflag() { + local key=${1} + local val=${2} + + # If you update these, also update the list pkg/version/def.bzl. + echo "-X '${KUBE_GO_PACKAGE}/pkg/version.${key}=${val}'" + echo "-X '${KUBE_GO_PACKAGE}/vendor/k8s.io/client-go/pkg/version.${key}=${val}'" +} + +# Prints the value that needs to be passed to the -ldflags parameter of go build +# in order to set the Kubernetes based on the git tree status. +# IMPORTANT: if you update any of these, also update the lists in +# pkg/version/def.bzl and hack/print-workspace-status.sh. +kube::version::ldflags() { + kube::version::get_version_vars + + local buildDate= + [[ -z ${SOURCE_DATE_EPOCH-} ]] || buildDate="--date=@${SOURCE_DATE_EPOCH}" + local -a ldflags=($(kube::version::ldflag "buildDate" "$(date ${buildDate} -u +'%Y-%m-%dT%H:%M:%SZ')")) + if [[ -n ${KUBE_GIT_COMMIT-} ]]; then + ldflags+=($(kube::version::ldflag "gitCommit" "${KUBE_GIT_COMMIT}")) + ldflags+=($(kube::version::ldflag "gitTreeState" "${KUBE_GIT_TREE_STATE}")) + fi + + if [[ -n ${KUBE_GIT_VERSION-} ]]; then + ldflags+=($(kube::version::ldflag "gitVersion" "${KUBE_GIT_VERSION}")) + fi + + if [[ -n ${KUBE_GIT_MAJOR-} && -n ${KUBE_GIT_MINOR-} ]]; then + ldflags+=( + $(kube::version::ldflag "gitMajor" "${KUBE_GIT_MAJOR}") + $(kube::version::ldflag "gitMinor" "${KUBE_GIT_MINOR}") + ) + fi + + # The -ldflags parameter takes a single string, so join the output. + echo "${ldflags[*]-}" +} diff --git a/vendor/k8s.io/kubernetes/test/kubemark/BUILD b/vendor/k8s.io/kubernetes/test/kubemark/BUILD new file mode 100644 index 000000000..7e76248ad --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/BUILD @@ -0,0 +1,14 @@ +package(default_visibility = ["//visibility:public"]) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/test/kubemark/OWNERS b/vendor/k8s.io/kubernetes/test/kubemark/OWNERS new file mode 100644 index 000000000..329e8801d --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - gmarek + - shyamjvs + - wojtek-t +approvers: + - gmarek + - shyamjvs + - wojtek-t diff --git a/vendor/k8s.io/kubernetes/test/kubemark/cloud-provider-config.sh b/vendor/k8s.io/kubernetes/test/kubemark/cloud-provider-config.sh new file mode 100755 index 000000000..ad7029f23 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/cloud-provider-config.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +CLOUD_PROVIDER="${CLOUD_PROVIDER:-gce}" +CONTAINER_REGISTRY="${CONTAINER_REGISTRY:-gcr.io}" +PROJECT="${PROJECT:-}" +KUBEMARK_IMAGE_REGISTRY="${KUBEMARK_IMAGE_REGISTRY:-}" +KUBEMARK_IMAGE_MAKE_TARGET="${KUBEMARK_IMAGE_MAKE_TARGET:-gcloudpush}" diff --git a/vendor/k8s.io/kubernetes/test/kubemark/common/util.sh b/vendor/k8s.io/kubernetes/test/kubemark/common/util.sh new file mode 100644 index 000000000..05cdba4c7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/common/util.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Running cmd $RETRIES times in case of failures. +function run-cmd-with-retries { + RETRIES="${RETRIES:-3}" + for attempt in $(seq 1 "${RETRIES}"); do + local ret_val=0 + exec 5>&1 # Duplicate &1 to &5 for use below. + # We don't use 'local' to declare result as then ret_val always gets value 0. + # We use tee to output to &5 (redirected to stdout) while also storing it in the variable. + result=$("$@" 2>&1 | tee >(cat - >&5)) || ret_val="$?" + if [[ "${ret_val:-0}" -ne "0" ]]; then + if [[ $(echo "${result}" | grep -c "already exists") -gt 0 ]]; then + if [[ "${attempt}" == 1 ]]; then + # shellcheck disable=SC2154 # Color defined in sourced script + echo -e "${color_red}Failed to $1 $2 ${3:-} as the resource hasn't been deleted from a previous run.${color_norm}" >& 2 + exit 1 + fi + # shellcheck disable=SC2154 # Color defined in sourced script + echo -e "${color_yellow}Succeeded to $1 $2 ${3:-} in the previous attempt, but status response wasn't received.${color_norm}" + return 0 + fi + # shellcheck disable=SC2154 # Color defined in sourced script + echo -e "${color_yellow}Attempt $attempt failed to $1 $2 ${3:-}. Retrying.${color_norm}" >& 2 + sleep $((attempt * 5)) + else + # shellcheck disable=SC2154 # Color defined in sourced script + echo -e "${color_green}Succeeded to $1 $2 ${3:-}.${color_norm}" + return 0 + fi + done + # shellcheck disable=SC2154 # Color defined in sourced script + echo -e "${color_red}Failed to $1 $2 ${3:-}.${color_norm}" >& 2 + exit 1 +} diff --git a/vendor/k8s.io/kubernetes/test/kubemark/configure-kubectl.sh b/vendor/k8s.io/kubernetes/test/kubemark/configure-kubectl.sh new file mode 100755 index 000000000..2ac77b5e7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/configure-kubectl.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# Copyright 2015 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script assumes that kubectl binary is present in PATH. +kubectl config set-cluster hollow-cluster --server=http://localhost:8080 --insecure-skip-tls-verify=true +kubectl config set-credentials "$(whoami)" +kubectl config set-context hollow-context --cluster=hollow-cluster --user="$(whoami)" diff --git a/vendor/k8s.io/kubernetes/test/kubemark/gce/util.sh b/vendor/k8s.io/kubernetes/test/kubemark/gce/util.sh new file mode 100644 index 000000000..d4ba2c008 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/gce/util.sh @@ -0,0 +1,146 @@ +#!/usr/bin/env bash + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../../.. + +source "${KUBE_ROOT}/test/kubemark/common/util.sh" + +# Wrapper for gcloud compute, running it $RETRIES times in case of failures. +# Args: +# $@: all stuff that goes after 'gcloud compute' +function run-gcloud-compute-with-retries { + run-cmd-with-retries gcloud compute "$@" +} + +function authenticate-docker { + echo "Configuring registry authentication" + mkdir -p "${HOME}/.docker" + gcloud beta auth configure-docker -q +} + +# This function isn't too robust to race, but that should be ok given its one-off usage during setup. +function get-or-create-master-ip { + MASTER_IP=$(gcloud compute addresses describe "${MASTER_NAME}-ip" \ + --project "${PROJECT}" --region "${REGION}" -q --format='value(address)') 2>/dev/null || true + + if [[ -z "${MASTER_IP:-}" ]]; then + run-gcloud-compute-with-retries addresses create "${MASTER_NAME}-ip" \ + --project "${PROJECT}" \ + --region "${REGION}" -q + + MASTER_IP=$(gcloud compute addresses describe "${MASTER_NAME}-ip" \ + --project "${PROJECT}" --region "${REGION}" -q --format='value(address)') + fi +} + +function create-master-instance-with-resources { + GCLOUD_COMMON_ARGS=(--project "${PROJECT}" --zone "${ZONE}") + # Override the master image project to cos-cloud for COS images staring with `cos` string prefix. + DEFAULT_GCI_PROJECT=google-containers + if [[ "${GCI_VERSION}" == "cos"* ]]; then + DEFAULT_GCI_PROJECT=cos-cloud + fi + MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-${DEFAULT_GCI_PROJECT}} + + run-gcloud-compute-with-retries disks create "${MASTER_NAME}-pd" \ + "${GCLOUD_COMMON_ARGS[@]}" \ + --type "${MASTER_DISK_TYPE}" \ + --size "${MASTER_DISK_SIZE}" & + + if [ "${EVENT_PD:-}" == "true" ]; then + run-gcloud-compute-with-retries disks create "${MASTER_NAME}-event-pd" \ + "${GCLOUD_COMMON_ARGS[@]}" \ + --type "${MASTER_DISK_TYPE}" \ + --size "${MASTER_DISK_SIZE}" & + fi + + get-or-create-master-ip & + + wait + + run-gcloud-compute-with-retries instances create "${MASTER_NAME}" \ + "${GCLOUD_COMMON_ARGS[@]}" \ + --address "${MASTER_IP}" \ + --machine-type "${MASTER_SIZE}" \ + --image-project="${MASTER_IMAGE_PROJECT}" \ + --image "${MASTER_IMAGE}" \ + --tags "${MASTER_TAG}" \ + --subnet "${SUBNETWORK:-${NETWORK}}" \ + --scopes "storage-ro,logging-write" \ + --boot-disk-size "${MASTER_ROOT_DISK_SIZE}" \ + --disk "name=${MASTER_NAME}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no" + + run-gcloud-compute-with-retries instances add-metadata "${MASTER_NAME}" \ + "${GCLOUD_COMMON_ARGS[@]}" \ + --metadata-from-file startup-script="${KUBE_ROOT}/test/kubemark/resources/start-kubemark-master.sh" & + + if [ "${EVENT_PD:-}" == "true" ]; then + echo "Attaching ${MASTER_NAME}-event-pd to ${MASTER_NAME}" + run-gcloud-compute-with-retries instances attach-disk "${MASTER_NAME}" \ + "${GCLOUD_COMMON_ARGS[@]}" \ + --disk "${MASTER_NAME}-event-pd" \ + --device-name="master-event-pd" & + fi + + run-gcloud-compute-with-retries firewall-rules create "${MASTER_NAME}-https" \ + --project "${PROJECT}" \ + --network "${NETWORK}" \ + --source-ranges "0.0.0.0/0" \ + --target-tags "${MASTER_TAG}" \ + --allow "tcp:443" & + + wait +} + +# Command to be executed is '$1'. +# No. of retries is '$2' (if provided) or 1 (default). +function execute-cmd-on-master-with-retries() { + RETRIES="${2:-1}" run-gcloud-compute-with-retries ssh "${MASTER_NAME}" --zone="${ZONE}" --project="${PROJECT}" --command="$1" +} + +function copy-files() { + run-gcloud-compute-with-retries scp --recurse --zone="${ZONE}" --project="${PROJECT}" "$@" +} + +function delete-master-instance-and-resources { + GCLOUD_COMMON_ARGS=(--project "${PROJECT}" --zone "${ZONE}" --quiet) + + gcloud compute instances delete "${MASTER_NAME}" \ + "${GCLOUD_COMMON_ARGS[@]}" || true + + gcloud compute disks delete "${MASTER_NAME}-pd" \ + "${GCLOUD_COMMON_ARGS[@]}" || true + + gcloud compute disks delete "${MASTER_NAME}-event-pd" \ + "${GCLOUD_COMMON_ARGS[@]}" &> /dev/null || true + + gcloud compute addresses delete "${MASTER_NAME}-ip" \ + --project "${PROJECT}" \ + --region "${REGION}" \ + --quiet || true + + gcloud compute firewall-rules delete "${MASTER_NAME}-https" \ + --project "${PROJECT}" \ + --quiet || true + + if [ "${SEPARATE_EVENT_MACHINE:-false}" == "true" ]; then + gcloud compute instances delete "${EVENT_STORE_NAME}" \ + "${GCLOUD_COMMON_ARGS[@]}" || true + + gcloud compute disks delete "${EVENT_STORE_NAME}-pd" \ + "${GCLOUD_COMMON_ARGS[@]}" || true + fi +} diff --git a/vendor/k8s.io/kubernetes/test/kubemark/iks/shutdown.sh b/vendor/k8s.io/kubernetes/test/kubemark/iks/shutdown.sh new file mode 100644 index 000000000..9b0b2477a --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/iks/shutdown.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Script that destroys the clusters used, namespace, and deployment. + +KUBECTL=kubectl +KUBEMARK_DIRECTORY="${KUBE_ROOT}/test/kubemark" +RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources" + +# Login to cloud services +complete-login + +# Remove resources created for kubemark +# shellcheck disable=SC2154 # Color defined in sourced script +echo -e "${color_yellow}REMOVING RESOURCES${color_norm}" +spawn-config +"${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/addons" &> /dev/null || true +"${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/hollow-node.yaml" &> /dev/null || true +"${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/kubemark-ns.json" &> /dev/null || true +rm -rf "${RESOURCE_DIRECTORY}/addons" + "${RESOURCE_DIRECTORY}/hollow-node.yaml" &> /dev/null || true + +# Remove clusters, namespaces, and deployments +delete-clusters +if [[ -f "${RESOURCE_DIRECTORY}/iks-namespacelist.sh" ]] ; then + bash "${RESOURCE_DIRECTORY}/iks-namespacelist.sh" + rm -f "${RESOURCE_DIRECTORY}/iks-namespacelist.sh" +fi +# shellcheck disable=SC2154 # Color defined in sourced script +echo -e "${color_blue}EXECUTION COMPLETE${color_norm}" +exit 0 diff --git a/vendor/k8s.io/kubernetes/test/kubemark/iks/startup.sh b/vendor/k8s.io/kubernetes/test/kubemark/iks/startup.sh new file mode 100644 index 000000000..c5bcd9f3f --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/iks/startup.sh @@ -0,0 +1,316 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Script that creates a Kubemark cluster for IBM cloud. + +KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh" +KUBEMARK_DIRECTORY="${KUBE_ROOT}/test/kubemark" +RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources" + +# Generate secret and configMap for the hollow-node pods to work, prepare +# manifests of the hollow-node and heapster replication controllers from +# templates, and finally create these resources through kubectl. +function create-kube-hollow-node-resources { + # Create kubeconfig for Kubelet. + KUBELET_KUBECONFIG_CONTENTS="$(cat < "${RESOURCE_DIRECTORY}/addons/heapster.json" + metrics_mem_per_node=4 + metrics_mem=$((200 + metrics_mem_per_node*NUM_NODES)) + sed -i'' -e "s/{{METRICS_MEM}}/${metrics_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json" + metrics_cpu_per_node_numerator=${NUM_NODES} + metrics_cpu_per_node_denominator=2 + metrics_cpu=$((80 + metrics_cpu_per_node_numerator / metrics_cpu_per_node_denominator)) + sed -i'' -e "s/{{METRICS_CPU}}/${metrics_cpu}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json" + eventer_mem_per_node=500 + eventer_mem=$((200 * 1024 + eventer_mem_per_node*NUM_NODES)) + sed -i'' -e "s/{{EVENTER_MEM}}/${eventer_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json" + + # Cluster Autoscaler. + if [[ "${ENABLE_KUBEMARK_CLUSTER_AUTOSCALER:-}" == "true" ]]; then + echo "Setting up Cluster Autoscaler" + KUBEMARK_AUTOSCALER_MIG_NAME="${KUBEMARK_AUTOSCALER_MIG_NAME:-${NODE_INSTANCE_PREFIX}-group}" + KUBEMARK_AUTOSCALER_MIN_NODES="${KUBEMARK_AUTOSCALER_MIN_NODES:-0}" + KUBEMARK_AUTOSCALER_MAX_NODES="${KUBEMARK_AUTOSCALER_MAX_NODES:-${DESIRED_NODES}}" + NUM_NODES=${KUBEMARK_AUTOSCALER_MAX_NODES} + echo "Setting maximum cluster size to ${NUM_NODES}." + KUBEMARK_MIG_CONFIG="autoscaling.k8s.io/nodegroup: ${KUBEMARK_AUTOSCALER_MIG_NAME}" + sed "s/{{master_ip}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/cluster-autoscaler_template.json" > "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json" + sed -i'' -e "s/{{kubemark_autoscaler_mig_name}}/${KUBEMARK_AUTOSCALER_MIG_NAME}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json" + sed -i'' -e "s/{{kubemark_autoscaler_min_nodes}}/${KUBEMARK_AUTOSCALER_MIN_NODES}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json" + sed -i'' -e "s/{{kubemark_autoscaler_max_nodes}}/${KUBEMARK_AUTOSCALER_MAX_NODES}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json" + fi + + # Kube DNS. + if [[ "${ENABLE_KUBEMARK_KUBE_DNS:-}" == "true" ]]; then + echo "Setting up kube-dns" + sed "s/{{dns_domain}}/${KUBE_DNS_DOMAIN}/g" "${RESOURCE_DIRECTORY}/kube_dns_template.yaml" > "${RESOURCE_DIRECTORY}/addons/kube_dns.yaml" + fi + + "${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/addons" --namespace="kubemark" + set-registry-secrets + + # Create the replication controller for hollow-nodes. + # We allow to override the NUM_REPLICAS when running Cluster Autoscaler. + NUM_REPLICAS=${NUM_REPLICAS:-${NUM_NODES}} + sed "s/{{numreplicas}}/${NUM_REPLICAS}/g" "${RESOURCE_DIRECTORY}/hollow-node_template.yaml" > "${RESOURCE_DIRECTORY}/hollow-node.yaml" + proxy_cpu=20 + if [ "${NUM_NODES}" -gt 1000 ]; then + proxy_cpu=50 + fi + proxy_mem_per_node=50 + proxy_mem=$((100 * 1024 + proxy_mem_per_node*NUM_NODES)) + sed -i'' -e "s/{{HOLLOW_PROXY_CPU}}/${proxy_cpu}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" + sed -i'' -e "s/{{HOLLOW_PROXY_MEM}}/${proxy_mem}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" + sed -i'' -e "s'{{kubemark_image_registry}}'${KUBEMARK_IMAGE_REGISTRY}${KUBE_NAMESPACE}'g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" + sed -i'' -e "s/{{kubemark_image_tag}}/${KUBEMARK_IMAGE_TAG}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" + sed -i'' -e "s/{{master_ip}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" + sed -i'' -e "s/{{kubelet_verbosity_level}}/${KUBELET_TEST_LOG_LEVEL}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" + sed -i'' -e "s/{{kubeproxy_verbosity_level}}/${KUBEPROXY_TEST_LOG_LEVEL}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" + sed -i'' -e "s/{{use_real_proxier}}/${USE_REAL_PROXIER}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" + sed -i'' -e "s'{{kubemark_mig_config}}'${KUBEMARK_MIG_CONFIG:-}'g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" + "${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/hollow-node.yaml" --namespace="kubemark" + + echo "Created secrets, configMaps, replication-controllers required for hollow-nodes." +} + +# Wait until all hollow-nodes are running or there is a timeout. +function wait-for-hollow-nodes-to-run-or-timeout { + echo -n "Waiting for all hollow-nodes to become Running" + start=$(date +%s) + nodes=$("${KUBECTL}" --kubeconfig="${KUBECONFIG}" get node 2> /dev/null) || true + ready=$(($(echo "${nodes}" | grep -vc "NotReady") - 1)) + until [[ "${ready}" -ge "${NUM_REPLICAS}" ]]; do + echo -n "." + sleep 1 + now=$(date +%s) + # Fail it if it already took more than 30 minutes. + if [ $((now - start)) -gt 1800 ]; then + echo "" + # shellcheck disable=SC2154 # Color defined in sourced script + echo -e "${color_red} Timeout waiting for all hollow-nodes to become Running. ${color_norm}" + # Try listing nodes again - if it fails it means that API server is not responding + if "${KUBECTL}" --kubeconfig="${KUBECONFIG}" get node &> /dev/null; then + echo "Found only ${ready} ready hollow-nodes while waiting for ${NUM_NODES}." + else + echo "Got error while trying to list hollow-nodes. Probably API server is down." + fi + spawn-config + pods=$("${KUBECTL}" get pods -l name=hollow-node --namespace=kubemark) || true + running=$(($(echo "${pods}" | grep -c "Running"))) + echo "${running} hollow-nodes are reported as 'Running'" + not_running=$(($(echo "${pods}" | grep -vc "Running") - 1)) + echo "${not_running} hollow-nodes are reported as NOT 'Running'" + echo "${pods}" | grep -v "Running" + exit 1 + fi + nodes=$("${KUBECTL}" --kubeconfig="${KUBECONFIG}" get node 2> /dev/null) || true + ready=$(($(echo "${nodes}" | grep -vc "NotReady") - 1)) + done + # shellcheck disable=SC2154 # Color defined in sourced script + echo -e "${color_green} Done!${color_norm}" +} + +############################### Main Function ######################################## +# In order for the cluster autoscalar to function, the template file must be changed so that the ":443" +# is removed. This is because the port is already given with the MASTER_IP. + + +# Create clusters and populate with hollow nodes +complete-login +build-kubemark-image +choose-clusters +generate-values +set-hollow-master +echo "Creating kube hollow node resources" +create-kube-hollow-node-resources +master-config +# shellcheck disable=SC2154 # Color defined in sourced script +echo -e "${color_blue}EXECUTION COMPLETE${color_norm}" + +# Check status of Kubemark +# shellcheck disable=SC2154 # Color defined in sourced script +echo -e "${color_yellow}CHECKING STATUS${color_norm}" +wait-for-hollow-nodes-to-run-or-timeout + +# Celebrate +echo "" +# shellcheck disable=SC2154 # Color defined in sourced script +echo -e "${color_blue}SUCCESS${color_norm}" +clean-repo +exit 0 \ No newline at end of file diff --git a/vendor/k8s.io/kubernetes/test/kubemark/iks/util.sh b/vendor/k8s.io/kubernetes/test/kubemark/iks/util.sh new file mode 100644 index 000000000..de91618f9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/iks/util.sh @@ -0,0 +1,222 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../../.. + +# Creates a new kube-spawn cluster +function create-clusters { + # shellcheck disable=SC2154 # Color defined in sourced script + echo -e "${color_yellow}CHECKING CLUSTERS${color_norm}" + if bx cs clusters | grep -Fq 'deleting'; then + echo -n "Deleting old clusters" + fi + while bx cs clusters | grep -Fq 'deleting' + do + echo -n "." + sleep 10 + done + echo "" + bx cs region-set us-east >/dev/null + bx cs vlans wdc06 >/dev/null + PRIVLAN=$(bx cs vlans wdc06 --json | jq '. | .[] | select(.type == "private") | .id' | sed -e "s/\"//g") + PUBVLAN=$(bx cs vlans wdc06 --json | jq '. | .[] | select(.type == "public") | .id' | sed -e "s/\"//g") + if ! bx cs clusters | grep -Fq 'kubeSpawnTester'; then + echo "Creating spawning cluster" + bx cs cluster-create --location "${CLUSTER_LOCATION}" --public-vlan "${PUBVLAN}" --private-vlan "${PRIVLAN}" --workers 2 --machine-type u2c.2x4 --name kubeSpawnTester + fi + if ! bx cs clusters | grep -Fq 'kubeMasterTester'; then + echo "Creating master cluster" + bx cs cluster-create --location "${CLUSTER_LOCATION}" --public-vlan "${PUBVLAN}" --private-vlan "${PRIVLAN}" --workers 2 --machine-type u2c.2x4 --name kubeMasterTester + fi + push-image + if ! bx cs clusters | grep 'kubeSpawnTester' | grep -Fq 'normal'; then + # shellcheck disable=SC2154 # Color defined in sourced script + echo -e "${color_cyan}Warning: new clusters may take up to 60 minutes to be ready${color_norm}" + echo -n "Clusters loading" + fi + while ! bx cs clusters | grep 'kubeSpawnTester' | grep -Fq 'normal' + do + echo -n "." + sleep 5 + done + while ! bx cs clusters | grep 'kubeMasterTester' | grep -Fq 'normal' + do + echo -n "." + sleep 5 + done + echo -e "${color_yellow}CLUSTER CREATION COMPLETE${color_norm}" +} + +# Builds and pushes image to registry +function push-image { + if [[ "${ISBUILD}" = "y" ]]; then + if ! bx cr namespaces | grep -Fq "${KUBE_NAMESPACE}"; then + echo "Creating registry namespace" + bx cr namespace-add "${KUBE_NAMESPACE}" + echo "bx cr namespace-rm ${KUBE_NAMESPACE}" >> "${RESOURCE_DIRECTORY}/iks-namespacelist.sh" + fi + docker build -t "${KUBEMARK_INIT_TAG}" "${KUBEMARK_IMAGE_LOCATION}" + docker tag "${KUBEMARK_INIT_TAG}" "${KUBEMARK_IMAGE_REGISTRY}${KUBE_NAMESPACE}/${PROJECT}:${KUBEMARK_IMAGE_TAG}" + docker push "${KUBEMARK_IMAGE_REGISTRY}${KUBE_NAMESPACE}/${PROJECT}:${KUBEMARK_IMAGE_TAG}" + echo "Image pushed" + else + KUBEMARK_IMAGE_REGISTRY="brandondr96" + KUBE_NAMESPACE="" + fi +} + +# Allow user to use existing clusters if desired +function choose-clusters { + echo -n -e "Do you want to use custom clusters? [y/N]${color_cyan}>${color_norm} " + read -r USE_EXISTING + if [[ "${USE_EXISTING}" = "y" ]]; then + echo -e "${color_yellow}Enter path for desired hollow-node spawning cluster kubeconfig file:${color_norm}" + read -r CUSTOM_SPAWN_CONFIG + echo -e "${color_yellow}Enter path for desired hollow-node hosting cluster kubeconfig file:${color_norm}" + read -r CUSTOM_MASTER_CONFIG + push-image + elif [[ "${USE_EXISTING}" = "N" ]]; then + create-clusters + else + # shellcheck disable=SC2154 # Color defined in sourced script + echo -e "${color_red}Invalid response, please try again:${color_norm}" + choose-clusters + fi +} + +# Ensure secrets are correctly set +function set-registry-secrets { + spawn-config + kubectl get secret bluemix-default-secret-regional -o yaml | sed 's/default/kubemark/g' | kubectl -n kubemark create -f - + kubectl patch serviceaccount -n kubemark default -p '{"imagePullSecrets": [{"name": "bluemix-kubemark-secret"}]}' + kubectl -n kubemark get serviceaccounts default -o json | jq 'del(.metadata.resourceVersion)' | jq 'setpath(["imagePullSecrets"];[{"name":"bluemix-kubemark-secret-regional"}])' | kubectl -n kubemark replace serviceaccount default -f - +} + +# Sets the hollow-node master +# Exported variables: +# MASTER_IP - IP Address of the Kubemark master +function set-hollow-master { + echo -e "${color_yellow}CONFIGURING MASTER${color_norm}" + master-config + MASTER_IP=$(grep server "$KUBECONFIG" | awk -F "/" '{print $3}') + export MASTER_IP +} + +# Set up master cluster environment +# Exported variables: +# KUBECONFIG - Overrides default kube config for the purpose of setting up the Kubemark master components. +function master-config { + if [[ "${USE_EXISTING}" = "y" ]]; then + export KUBECONFIG=${CUSTOM_MASTER_CONFIG} + else + eval "$(bx cs cluster-config kubeMasterTester --admin | grep export)" + fi +} + +# Set up spawn cluster environment +# Exported variables: +# KUBECONFIG - Overrides default kube config for the purpose of setting up the hollow-node cluster. +function spawn-config { + if [[ "${USE_EXISTING}" = "y" ]]; then + export KUBECONFIG=${CUSTOM_SPAWN_CONFIG} + else + eval "$(bx cs cluster-config kubeSpawnTester --admin | grep export)" + fi +} + +# Deletes existing clusters +function delete-clusters { + echo "DELETING CLUSTERS" + bx cs cluster-rm kubeSpawnTester + bx cs cluster-rm kubeMasterTester + while ! bx cs clusters | grep 'kubeSpawnTester' | grep -Fq 'deleting' + do + sleep 5 + done + while ! bx cs clusters | grep 'kubeMasterTester' | grep -Fq 'deleting' + do + sleep 5 + done + kubectl delete ns kubemark +} + +# Login to cloud services +function complete-login { + echo -e "${color_yellow}LOGGING INTO CLOUD SERVICES${color_norm}" + echo -n -e "Do you have a federated IBM cloud login? [y/N]${color_cyan}>${color_norm} " + read -r ISFED + if [[ "${ISFED}" = "y" ]]; then + bx login --sso -a "${REGISTRY_LOGIN_URL}" + elif [[ "${ISFED}" = "N" ]]; then + bx login -a "${REGISTRY_LOGIN_URL}" + else + echo -e "${color_red}Invalid response, please try again:${color_norm}" + complete-login + fi + bx cr login +} + +# Generate values to fill the hollow-node configuration templates. +# Exported variables: +# KUBECTL - The name or path to the kubernetes client binary. +# TEST_CLUSTER_API_CONTENT_TYPE - Defines the content-type of the requests used by the Kubemark components. +function generate-values { + echo "Generating values" + master-config + KUBECTL=kubectl + export KUBECTL + KUBEMARK_DIRECTORY="${KUBE_ROOT}/test/kubemark" + RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources" + TEST_CLUSTER_API_CONTENT_TYPE="bluemix" #Determine correct usage of this + export TEST_CLUSTER_API_CONTENT_TYPE + CONFIGPATH=${KUBECONFIG%/*} + KUBELET_CERT_BASE64="${KUBELET_CERT_BASE64:-$(base64 "${CONFIGPATH}/admin.pem" | tr -d '\r\n')}" + KUBELET_KEY_BASE64="${KUBELET_KEY_BASE64:-$(base64 "${CONFIGPATH}/admin-key.pem" | tr -d '\r\n')}" + CA_CERT_BASE64="${CA_CERT_BASE64:-$( base64 "$(find "${CONFIGPATH}" -name "*ca*" | head -n 1)" | tr -d '\r\n')}" + +} + +# Build image for kubemark +function build-kubemark-image { + echo -n -e "Do you want to build the kubemark image? [y/N]${color_cyan}>${color_norm} " + read -r ISBUILD + if [[ "${ISBUILD}" = "y" ]]; then + echo -e "${color_yellow}BUILDING IMAGE${color_norm}" + "${KUBE_ROOT}/build/run.sh" make kubemark + cp "${KUBE_ROOT}/_output/dockerized/bin/linux/amd64/kubemark" "${KUBEMARK_IMAGE_LOCATION}" + elif [[ "${ISBUILD}" = "N" ]]; then + echo -n "" + else + echo -e "${color_red}Invalid response, please try again:${color_norm}" + build-kubemark-image + fi +} + +# Clean up repository +function clean-repo { + echo -n -e "Do you want to remove build output and binary? [y/N]${color_cyan}>${color_norm} " + read -r ISCLEAN + if [[ "${ISCLEAN}" = "y" ]]; then + echo -e "${color_yellow}CLEANING REPO${color_norm}" + rm -rf "${KUBE_ROOT}/_output" + rm -f "${KUBEMARK_IMAGE_LOCATION}/kubemark" + elif [[ "${ISCLEAN}" = "N" ]]; then + echo -n "" + else + echo -e "${color_red}Invalid response, please try again:${color_norm}" + clean-repo + fi +} diff --git a/vendor/k8s.io/kubernetes/test/kubemark/master-log-dump.sh b/vendor/k8s.io/kubernetes/test/kubemark/master-log-dump.sh new file mode 100755 index 000000000..70eae3d6e --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/master-log-dump.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +REPORT_DIR="${1:-_artifacts}" +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../.. + +source "${KUBE_ROOT}/test/kubemark/cloud-provider-config.sh" +source "${KUBE_ROOT}/cluster/kubemark/${CLOUD_PROVIDER}/config-default.sh" + +export KUBEMARK_MASTER_NAME="${MASTER_NAME}" + +echo "Dumping logs for kubemark master: ${KUBEMARK_MASTER_NAME}" +DUMP_ONLY_MASTER_LOGS=true "${KUBE_ROOT}/cluster/log-dump/log-dump.sh" "${REPORT_DIR}" diff --git a/vendor/k8s.io/kubernetes/test/kubemark/pre-existing/README.md b/vendor/k8s.io/kubernetes/test/kubemark/pre-existing/README.md new file mode 100644 index 000000000..3ea0ba792 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/pre-existing/README.md @@ -0,0 +1,54 @@ +# Kubemark Pre-existing Provider Guide + +**Kubemark Master** +- A set of Kubernetes control plane components running in a VM + +**Kubernetes Cluster** +- A real Kubernetes Cluster that has master and nodes. The hollow-node pods + are run in this cluster, but appear as nodes to the Kubemark Master + +## Introduction + +Every running Kubemark setup looks like the following: + 1) A running Kubernetes cluster pointed to by the local kubeconfig + 2) A separate VM where the kubemark master is running + 3) Some hollow-nodes that run on the Kubernetes Cluster from #1 + 4) The hollow-nodes are configured to talk with the kubemark master at #2 + +When using the pre-existing provider, the developer is responsible for creating +#1 and #2. Therefore, the kubemark scripts will not create any infrastructure +or start a kubemark master like in other providers. Instead, the existing +resources provided by the VM at $MASTER_IP will serve as the kubemark master. + +## Use Case + +The goal of the pre-existing provider is to use the kubemark tools with an +existing kubermark master. It's meant to provide the developer with +additional flexibility to customize the cluster infrastructure and still use +the kubemark setup tools. The pre-existing provider is an **advanced** use +case that requires the developer to have knowledge of setting up a kubemark +master. + +## Requirements + +To use the pre-existing provider, the expectation is that there's a kubemark +master that is rechable at $MASTER_IP. The machine that the kubemark master is +on has to be ssh able from the host that's executing the kubemark scripts. And +the user on that machine has to be 'kubernetes'. + +Requirement checklist: +- Set MASTER_IP to ip address to the kubemark master +- The host where you execute the kubemark scripts must be able to ssh to + kubernetes@$MASTER_IP + +## Example Configuration + +_test/kubemark/cloud-provider-config.sh_ + +``` +CLOUD_PROVIDER="pre-existing" +KUBEMARK_IMAGE_MAKE_TARGET="push" +CONTAINER_REGISTRY=docker.io +PROJECT="rthallisey" +MASTER_IP="192.168.121.29:6443" +``` diff --git a/vendor/k8s.io/kubernetes/test/kubemark/pre-existing/util.sh b/vendor/k8s.io/kubernetes/test/kubemark/pre-existing/util.sh new file mode 100644 index 000000000..3e279af5e --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/pre-existing/util.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../../.. + +source "${KUBE_ROOT}/test/kubemark/common/util.sh" + +# Leave the skeleton definition of execute-cmd-on-master-with-retries +# so only the pre-existing provider functions will target this. +function execute-cmd-on-pre-existing-master-with-retries() { + IP_WITHOUT_PORT=$(echo "${MASTER_IP}" | cut -f 1 -d ':') || "${MASTER_IP}" + + RETRIES="${2:-1}" run-cmd-with-retries ssh kubernetes@"${IP_WITHOUT_PORT}" "${1}" +} diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/addons/heapster.json b/vendor/k8s.io/kubernetes/test/kubemark/resources/addons/heapster.json new file mode 100644 index 000000000..821e7f235 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/addons/heapster.json @@ -0,0 +1,82 @@ +{ + "kind": "ReplicationController", + "apiVersion": "v1", + "metadata": { + "name": "heapster-v1.3.0", + "labels": { + "k8s-app": "heapster", + "version": "v1.3.0" + } + }, + "spec": { + "replicas": 1, + "selector": { + "k8s-app": "heapster", + "version": "v1.3.0" + }, + "template": { + "metadata": { + "labels": { + "k8s-app": "heapster", + "version": "v1.3.0" + } + }, + "spec": { + "volumes": [ + { + "name": "kubeconfig-volume", + "secret": { + "secretName": "kubeconfig" + } + } + ], + "containers": [ + { + "name": "heapster", + "image": "k8s.gcr.io/heapster:v1.3.0", + "resources": { + "requests": { + "cpu": "81m", + "memory": "212Mi" + } + }, + "command": [ + "/heapster" + ], + "args": [ + "--source=kubernetes:https://35.238.5.171:443?inClusterConfig=0&useServiceAccount=0&auth=/kubeconfig/heapster.kubeconfig" + ], + "volumeMounts": [ + { + "name": "kubeconfig-volume", + "mountPath": "/kubeconfig" + } + ] + }, + { + "name": "eventer", + "image": "k8s.gcr.io/heapster:v1.3.0", + "resources": { + "requests": { + "memory": "206300Ki" + } + }, + "command": [ + "/eventer" + ], + "args": [ + "--source=kubernetes:https://35.238.5.171:443?inClusterConfig=0&useServiceAccount=0&auth=/kubeconfig/heapster.kubeconfig" + ], + "volumeMounts": [ + { + "name": "kubeconfig-volume", + "mountPath": "/kubeconfig" + } + ] + + }] + } + } + } +} + diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/addons/kube_dns.yaml b/vendor/k8s.io/kubernetes/test/kubemark/resources/addons/kube_dns.yaml new file mode 100644 index 000000000..4c7bf8ce2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/addons/kube_dns.yaml @@ -0,0 +1,188 @@ +apiVersion: v1 +kind: Service +metadata: + name: kube-dns + namespace: kubemark + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "KubeDNS" +spec: + selector: + k8s-app: kube-dns + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-dns + namespace: kubemark + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-dns + namespace: kubemark + labels: + addonmanager.kubernetes.io/mode: EnsureExists +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-dns + namespace: kubemark + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +spec: + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 0 + selector: + matchLabels: + k8s-app: kube-dns + template: + metadata: + labels: + k8s-app: kube-dns + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + volumes: + - name: kube-dns-config + configMap: + name: kube-dns + optional: true + - name: secret-volume + secret: + secretName: kubeconfig + containers: + - name: kubedns + image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.9 + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + livenessProbe: + httpGet: + path: /healthcheck/kubedns + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /readiness + port: 8081 + scheme: HTTP + initialDelaySeconds: 3 + timeoutSeconds: 5 + args: + - --domain=cluster.local. + - --dns-port=10053 + - --config-dir=/kube-dns-config + - --kubecfg-file=/etc/secret-volume/dns.kubeconfig + - --v=2 + env: + - name: PROMETHEUS_PORT + value: "10055" + ports: + - containerPort: 10053 + name: dns-local + protocol: UDP + - containerPort: 10053 + name: dns-tcp-local + protocol: TCP + - containerPort: 10055 + name: metrics + protocol: TCP + volumeMounts: + - name: kube-dns-config + mountPath: /kube-dns-config + - name: secret-volume + mountPath: /etc/secret-volume + - name: dnsmasq + image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.9 + livenessProbe: + httpGet: + path: /healthcheck/dnsmasq + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + args: + - -v=2 + - -logtostderr + - -configDir=/etc/k8s/dns/dnsmasq-nanny + - -restartDnsmasq=true + - -- + - -k + - --cache-size=1000 + - --no-negcache + - --dns-loop-detect + - --log-facility=- + - --server=/cluster.local/127.0.0.1#10053 + - --server=/in-addr.arpa/127.0.0.1#10053 + - --server=/ip6.arpa/127.0.0.1#10053 + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + # see: https://github.com/kubernetes/kubernetes/issues/29055 for details + resources: + requests: + cpu: 150m + memory: 20Mi + volumeMounts: + - name: kube-dns-config + mountPath: /etc/k8s/dns/dnsmasq-nanny + - name: sidecar + image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.9 + livenessProbe: + httpGet: + path: /metrics + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + args: + - --v=2 + - --logtostderr + - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV + - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV + ports: + - containerPort: 10054 + name: metrics + protocol: TCP + resources: + requests: + memory: 20Mi + cpu: 10m + dnsPolicy: Default # Don't use cluster DNS. + serviceAccountName: kube-dns diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/cluster-autoscaler_template.json b/vendor/k8s.io/kubernetes/test/kubemark/resources/cluster-autoscaler_template.json new file mode 100644 index 000000000..ffe4a61f8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/cluster-autoscaler_template.json @@ -0,0 +1,94 @@ +{ + "kind": "Pod", + "apiVersion": "v1", + "metadata": { + "name": "cluster-autoscaler", + "namespace": "kubemark", + "labels": { + "tier": "cluster-management", + "component": "cluster-autoscaler" + } + }, + "spec": { + "hostNetwork": true, + "containers": [ + { + "name": "cluster-autoscaler", + "image": "k8s.gcr.io/cluster-autoscaler:v1.0.0", + "command": [ + "./run.sh", + "--kubernetes=https://{{master_ip}}:443?inClusterConfig=0&useServiceAccount=0&auth=/kubeconfig/cluster_autoscaler.kubeconfig", + "--v=4", + "--logtostderr=true", + "--write-status-configmap=true", + "--cloud-provider=kubemark", + "--nodes={{kubemark_autoscaler_min_nodes}}:{{kubemark_autoscaler_max_nodes}}:{{kubemark_autoscaler_mig_name}}" + ], + "env": [ + { + "name": "LOG_OUTPUT", + "value": "/var/log/cluster-autoscaler.log" + } + ], + "resources": { + "requests": { + "cpu": "10m", + "memory": "300Mi" + } + }, + "volumeMounts": [ + {"name": "cloudconfigmount","mountPath": "/etc/gce.conf", "readOnly": true}, + { + "name": "ssl-certs", + "readOnly": true, + "mountPath": "/etc/ssl/certs" + }, + { + "name": "usrsharecacerts", + "readOnly": true, + "mountPath": "/usr/share/ca-certificates" + }, + { + "name": "logdir", + "mountPath": "/var/log", + "readOnly": false + }, + { + "name": "kubeconfig-volume", + "mountPath": "/kubeconfig" + } + ], + "terminationMessagePath": "/dev/termination-log", + "imagePullPolicy": "Always" + } + ], + "volumes": [ + {"name": "cloudconfigmount","hostPath": {"path": "/etc/gce.conf"}}, + { + "name": "ssl-certs", + "hostPath": { + "path": "/etc/ssl/certs" + } + }, + { + "name": "usrsharecacerts", + "hostPath": { + "path": "/usr/share/ca-certificates" + } + }, + { + "name": "logdir", + "hostPath": { + "path": "/var/log" + } + }, + { + "name": "kubeconfig-volume", + "secret": { + "secretName": "kubeconfig" + } + } + ], + "restartPolicy": "Always" + } +} diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/heapster_template.json b/vendor/k8s.io/kubernetes/test/kubemark/resources/heapster_template.json new file mode 100644 index 000000000..5a9bb512e --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/heapster_template.json @@ -0,0 +1,82 @@ +{ + "kind": "ReplicationController", + "apiVersion": "v1", + "metadata": { + "name": "heapster-v1.3.0", + "labels": { + "k8s-app": "heapster", + "version": "v1.3.0" + } + }, + "spec": { + "replicas": 1, + "selector": { + "k8s-app": "heapster", + "version": "v1.3.0" + }, + "template": { + "metadata": { + "labels": { + "k8s-app": "heapster", + "version": "v1.3.0" + } + }, + "spec": { + "volumes": [ + { + "name": "kubeconfig-volume", + "secret": { + "secretName": "kubeconfig" + } + } + ], + "containers": [ + { + "name": "heapster", + "image": "k8s.gcr.io/heapster:v1.3.0", + "resources": { + "requests": { + "cpu": "{{METRICS_CPU}}m", + "memory": "{{METRICS_MEM}}Mi" + } + }, + "command": [ + "/heapster" + ], + "args": [ + "--source=kubernetes:https://{{MASTER_IP}}:443?inClusterConfig=0&useServiceAccount=0&auth=/kubeconfig/heapster.kubeconfig" + ], + "volumeMounts": [ + { + "name": "kubeconfig-volume", + "mountPath": "/kubeconfig" + } + ] + }, + { + "name": "eventer", + "image": "k8s.gcr.io/heapster:v1.3.0", + "resources": { + "requests": { + "memory": "{{EVENTER_MEM}}Ki" + } + }, + "command": [ + "/eventer" + ], + "args": [ + "--source=kubernetes:https://{{MASTER_IP}}:443?inClusterConfig=0&useServiceAccount=0&auth=/kubeconfig/heapster.kubeconfig" + ], + "volumeMounts": [ + { + "name": "kubeconfig-volume", + "mountPath": "/kubeconfig" + } + ] + + }] + } + } + } +} + diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/hollow-node.yaml b/vendor/k8s.io/kubernetes/test/kubemark/resources/hollow-node.yaml new file mode 100644 index 000000000..c293f92fb --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/hollow-node.yaml @@ -0,0 +1,132 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: hollow-node + labels: + name: hollow-node + +spec: + replicas: 3 + selector: + name: hollow-node + template: + metadata: + labels: + name: hollow-node + + spec: + initContainers: + - name: init-inotify-limit + image: busybox + command: ['sysctl', '-w', 'fs.inotify.max_user_instances=1000'] + securityContext: + privileged: true + volumes: + - name: kubeconfig-volume + secret: + secretName: kubeconfig + - name: kernelmonitorconfig-volume + configMap: + name: node-configmap + - name: logs-volume + hostPath: + path: /var/log + - name: no-serviceaccount-access-to-real-master + emptyDir: {} + containers: + - name: hollow-kubelet + image: gcr.io/rajadeepankubemark/kubemark:dvs93j + ports: + - containerPort: 4194 + - containerPort: 10250 + - containerPort: 10255 + env: + - name: CONTENT_TYPE + valueFrom: + configMapKeyRef: + name: node-configmap + key: content.type + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + command: + - /bin/sh + - -c + - /kubemark --morph=kubelet --name=$(NODE_NAME) --v=4 --v=4 --kubeconfig=/kubeconfig/kubelet.kubeconfig $(CONTENT_TYPE) --alsologtostderr 1>>/var/log/kubelet-$(NODE_NAME).log 2>&1 + volumeMounts: + - name: kubeconfig-volume + mountPath: /kubeconfig + readOnly: true + - name: logs-volume + mountPath: /var/log + resources: + requests: + cpu: 40m + memory: 100M + securityContext: + privileged: true + - name: hollow-proxy + image: gcr.io/rajadeepankubemark/kubemark:dvs93j + env: + - name: CONTENT_TYPE + valueFrom: + configMapKeyRef: + name: node-configmap + key: content.type + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + command: + - /bin/sh + - -c + - /kubemark --morph=proxy --name=$(NODE_NAME) --v=4 --v=4 --kubeconfig=/kubeconfig/kubeproxy.kubeconfig $(CONTENT_TYPE) --alsologtostderr 1>>/var/log/kubeproxy-$(NODE_NAME).log 2>&1 + volumeMounts: + - name: kubeconfig-volume + mountPath: /kubeconfig + readOnly: true + - name: logs-volume + mountPath: /var/log + resources: + requests: + cpu: 20m + memory: 102550Ki + - name: hollow-node-problem-detector + image: k8s.gcr.io/node-problem-detector:v0.4.1 + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + command: + - /bin/sh + - -c + - /node-problem-detector --system-log-monitors=/config/kernel.monitor --apiserver-override="https://35.238.5.171:443?inClusterConfig=false&auth=/kubeconfig/npd.kubeconfig" --alsologtostderr 1>>/var/log/npd-$(NODE_NAME).log 2>&1 + volumeMounts: + - name: kubeconfig-volume + mountPath: /kubeconfig + readOnly: true + - name: kernelmonitorconfig-volume + mountPath: /config + readOnly: true + - name: no-serviceaccount-access-to-real-master + mountPath: /var/run/secrets/kubernetes.io/serviceaccount + readOnly: true + - name: logs-volume + mountPath: /var/log + resources: + requests: + cpu: 20m + memory: 20Mi + securityContext: + privileged: true + # Keep the pod running on unreachable node for 15 minutes. + # This time should be sufficient for a VM reboot and should + # avoid recreating a new hollow node. + # See https://github.com/kubernetes/kubernetes/issues/67120 for context. + tolerations: + - key: "node.kubernetes.io/unreachable" + operator: "Exists" + effect: "NoExecute" + tolerationSeconds: 900 diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/hollow-node_template.yaml b/vendor/k8s.io/kubernetes/test/kubemark/resources/hollow-node_template.yaml new file mode 100644 index 000000000..af04b33e3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/hollow-node_template.yaml @@ -0,0 +1,132 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: hollow-node + labels: + name: hollow-node + {{kubemark_mig_config}} +spec: + replicas: {{numreplicas}} + selector: + name: hollow-node + template: + metadata: + labels: + name: hollow-node + {{kubemark_mig_config}} + spec: + initContainers: + - name: init-inotify-limit + image: busybox + command: ['sysctl', '-w', 'fs.inotify.max_user_instances=1000'] + securityContext: + privileged: true + volumes: + - name: kubeconfig-volume + secret: + secretName: kubeconfig + - name: kernelmonitorconfig-volume + configMap: + name: node-configmap + - name: logs-volume + hostPath: + path: /var/log + - name: no-serviceaccount-access-to-real-master + emptyDir: {} + containers: + - name: hollow-kubelet + image: {{kubemark_image_registry}}/kubemark:{{kubemark_image_tag}} + ports: + - containerPort: 4194 + - containerPort: 10250 + - containerPort: 10255 + env: + - name: CONTENT_TYPE + valueFrom: + configMapKeyRef: + name: node-configmap + key: content.type + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + command: + - /bin/sh + - -c + - /kubemark --morph=kubelet --name=$(NODE_NAME) {{hollow_kubelet_params}} --kubeconfig=/kubeconfig/kubelet.kubeconfig $(CONTENT_TYPE) --alsologtostderr 1>>/var/log/kubelet-$(NODE_NAME).log 2>&1 + volumeMounts: + - name: kubeconfig-volume + mountPath: /kubeconfig + readOnly: true + - name: logs-volume + mountPath: /var/log + resources: + requests: + cpu: 40m + memory: 100M + securityContext: + privileged: true + - name: hollow-proxy + image: {{kubemark_image_registry}}/kubemark:{{kubemark_image_tag}} + env: + - name: CONTENT_TYPE + valueFrom: + configMapKeyRef: + name: node-configmap + key: content.type + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + command: + - /bin/sh + - -c + - /kubemark --morph=proxy --name=$(NODE_NAME) {{hollow_proxy_params}} --kubeconfig=/kubeconfig/kubeproxy.kubeconfig $(CONTENT_TYPE) --alsologtostderr 1>>/var/log/kubeproxy-$(NODE_NAME).log 2>&1 + volumeMounts: + - name: kubeconfig-volume + mountPath: /kubeconfig + readOnly: true + - name: logs-volume + mountPath: /var/log + resources: + requests: + cpu: {{HOLLOW_PROXY_CPU}}m + memory: {{HOLLOW_PROXY_MEM}}Ki + - name: hollow-node-problem-detector + image: k8s.gcr.io/node-problem-detector:v0.4.1 + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + command: + - /bin/sh + - -c + - /node-problem-detector --system-log-monitors=/config/kernel.monitor --apiserver-override="https://{{master_ip}}:443?inClusterConfig=false&auth=/kubeconfig/npd.kubeconfig" --alsologtostderr 1>>/var/log/npd-$(NODE_NAME).log 2>&1 + volumeMounts: + - name: kubeconfig-volume + mountPath: /kubeconfig + readOnly: true + - name: kernelmonitorconfig-volume + mountPath: /config + readOnly: true + - name: no-serviceaccount-access-to-real-master + mountPath: /var/run/secrets/kubernetes.io/serviceaccount + readOnly: true + - name: logs-volume + mountPath: /var/log + resources: + requests: + cpu: 20m + memory: 20Mi + securityContext: + privileged: true + # Keep the pod running on unreachable node for 15 minutes. + # This time should be sufficient for a VM reboot and should + # avoid recreating a new hollow node. + # See https://github.com/kubernetes/kubernetes/issues/67120 for context. + tolerations: + - key: "node.kubernetes.io/unreachable" + operator: "Exists" + effect: "NoExecute" + tolerationSeconds: 900 diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/kernel-monitor.json b/vendor/k8s.io/kubernetes/test/kubemark/resources/kernel-monitor.json new file mode 100644 index 000000000..02c47420d --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/kernel-monitor.json @@ -0,0 +1,20 @@ +{ + "plugin": "filelog", + "pluginConfig": { + "timestamp": "dummy", + "message": "dummy", + "timestampFormat": "dummy" + }, + "logPath": "/dev/null", + "lookback": "10m", + "bufferSize": 10, + "source": "kernel-monitor", + "conditions": [ + { + "type": "KernelDeadlock", + "reason": "KernelHasNoDeadlock", + "message": "kernel has no deadlock" + } + ], + "rules": [] +} diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/kube_dns_template.yaml b/vendor/k8s.io/kubernetes/test/kubemark/resources/kube_dns_template.yaml new file mode 100644 index 000000000..02c2a68a2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/kube_dns_template.yaml @@ -0,0 +1,188 @@ +apiVersion: v1 +kind: Service +metadata: + name: kube-dns + namespace: kubemark + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "KubeDNS" +spec: + selector: + k8s-app: kube-dns + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-dns + namespace: kubemark + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-dns + namespace: kubemark + labels: + addonmanager.kubernetes.io/mode: EnsureExists +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-dns + namespace: kubemark + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +spec: + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 0 + selector: + matchLabels: + k8s-app: kube-dns + template: + metadata: + labels: + k8s-app: kube-dns + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + volumes: + - name: kube-dns-config + configMap: + name: kube-dns + optional: true + - name: secret-volume + secret: + secretName: kubeconfig + containers: + - name: kubedns + image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.9 + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + livenessProbe: + httpGet: + path: /healthcheck/kubedns + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /readiness + port: 8081 + scheme: HTTP + initialDelaySeconds: 3 + timeoutSeconds: 5 + args: + - --domain={{dns_domain}}. + - --dns-port=10053 + - --config-dir=/kube-dns-config + - --kubecfg-file=/etc/secret-volume/dns.kubeconfig + - --v=2 + env: + - name: PROMETHEUS_PORT + value: "10055" + ports: + - containerPort: 10053 + name: dns-local + protocol: UDP + - containerPort: 10053 + name: dns-tcp-local + protocol: TCP + - containerPort: 10055 + name: metrics + protocol: TCP + volumeMounts: + - name: kube-dns-config + mountPath: /kube-dns-config + - name: secret-volume + mountPath: /etc/secret-volume + - name: dnsmasq + image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.9 + livenessProbe: + httpGet: + path: /healthcheck/dnsmasq + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + args: + - -v=2 + - -logtostderr + - -configDir=/etc/k8s/dns/dnsmasq-nanny + - -restartDnsmasq=true + - -- + - -k + - --cache-size=1000 + - --no-negcache + - --dns-loop-detect + - --log-facility=- + - --server=/{{dns_domain}}/127.0.0.1#10053 + - --server=/in-addr.arpa/127.0.0.1#10053 + - --server=/ip6.arpa/127.0.0.1#10053 + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + # see: https://github.com/kubernetes/kubernetes/issues/29055 for details + resources: + requests: + cpu: 150m + memory: 20Mi + volumeMounts: + - name: kube-dns-config + mountPath: /etc/k8s/dns/dnsmasq-nanny + - name: sidecar + image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.9 + livenessProbe: + httpGet: + path: /metrics + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + args: + - --v=2 + - --logtostderr + - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{dns_domain}},5,SRV + - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{dns_domain}},5,SRV + ports: + - containerPort: 10054 + name: metrics + protocol: TCP + resources: + requests: + memory: 20Mi + cpu: 10m + dnsPolicy: Default # Don't use cluster DNS. + serviceAccountName: kube-dns diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/kubemark-master-env.sh b/vendor/k8s.io/kubernetes/test/kubemark/resources/kubemark-master-env.sh new file mode 100644 index 000000000..2bf842d4c --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/kubemark-master-env.sh @@ -0,0 +1,31 @@ +# Generic variables. +INSTANCE_PREFIX="kubernetes" +SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" +EVENT_PD="false" + +# Etcd related variables. +ETCD_IMAGE="3.3.10-0" +ETCD_VERSION="" + +# Controller-manager related variables. +CONTROLLER_MANAGER_TEST_ARGS=" --v=4 " +ALLOCATE_NODE_CIDRS="true" +CLUSTER_IP_RANGE="10.64.0.0/14" +TERMINATED_POD_GC_THRESHOLD="100" + +# Scheduler related variables. +SCHEDULER_TEST_ARGS=" --v=4 " + +# Apiserver related variables. +APISERVER_TEST_ARGS=" --runtime-config=extensions/v1beta1,scheduling.k8s.io/v1alpha1 --v=4 --delete-collection-workers=16" +STORAGE_MEDIA_TYPE="" +STORAGE_BACKEND="etcd3" +ETCD_SERVERS="" +ETCD_SERVERS_OVERRIDES="" +ETCD_COMPACTION_INTERVAL_SEC="" +RUNTIME_CONFIG="" +NUM_NODES="3" +CUSTOM_ADMISSION_PLUGINS="NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +FEATURE_GATES="ExperimentalCriticalPodAnnotation=true" +KUBE_APISERVER_REQUEST_TIMEOUT="300" +ENABLE_APISERVER_ADVANCED_AUDIT="false" diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/kubemark-ns.json b/vendor/k8s.io/kubernetes/test/kubemark/resources/kubemark-ns.json new file mode 100644 index 000000000..ea1e887df --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/kubemark-ns.json @@ -0,0 +1,7 @@ +{ + "kind": "Namespace", + "apiVersion": "v1", + "metadata": { + "name": "kubemark" + } +} diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/README.md b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/README.md new file mode 100644 index 000000000..1df67db62 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/README.md @@ -0,0 +1 @@ +These resources are used to add extra (non-default) bindings to kubemark to match users and groups that are particular to the kubemark environment. These are not standard bootstrap bindings and not standard users they are bound to, and have been adapted from cluster/addons/e2e-rbac-bindings. Tighten/loosen these access rights as required in future. diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/cluster-autoscaler-binding.yaml b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/cluster-autoscaler-binding.yaml new file mode 100644 index 000000000..4e1a89c14 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/cluster-autoscaler-binding.yaml @@ -0,0 +1,16 @@ +# This is the role binding for the kubemark cluster autoscaler. +# TODO: Use proper Cluster Autoscaler role (github.com/kubernetes/autoscaler/issues/383) +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cluster-autoscaler-view-binding + labels: + kubernetes.io/cluster-service: "true" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:cluster-autoscaler diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/heapster-binding.yaml b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/heapster-binding.yaml new file mode 100644 index 000000000..f2feb860a --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/heapster-binding.yaml @@ -0,0 +1,15 @@ +# This is the role binding for the kubemark heapster. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: heapster-view-binding + labels: + kubernetes.io/cluster-service: "true" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:heapster +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:heapster diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/kube-dns-binding.yaml b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/kube-dns-binding.yaml new file mode 100644 index 000000000..3f9b0d3df --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/kube-dns-binding.yaml @@ -0,0 +1,15 @@ +# This is the role binding for the kubemark kube-dns. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kube-dns-view-binding + labels: + kubernetes.io/cluster-service: "true" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kube-dns +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:kube-dns diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/kubecfg-binding.yaml b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/kubecfg-binding.yaml new file mode 100644 index 000000000..db8ffee35 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/kubecfg-binding.yaml @@ -0,0 +1,18 @@ +# This is the role binding for the local kubectl, which is +# used for listing hollow-nodes in start-kubemark.sh and +# send resource creation requests, etc in run-e2e-tests.sh. +# Also useful if you manually want to use local kubectl. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubecfg-cluster-admin + labels: + kubernetes.io/cluster-service: "true" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: kubecfg diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/kubelet-binding.yaml b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/kubelet-binding.yaml new file mode 100644 index 000000000..2c59627ae --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/kubelet-binding.yaml @@ -0,0 +1,18 @@ +# The Kubemark environment currently gives all kubelets a single shared credential. +# +# TODO: give each kubelet a credential in the system:nodes group with username system:node:, +# to exercise the Node authorizer and admission, then remove this binding +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubelet-node + labels: + kubernetes.io/cluster-service: "true" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: kubelet diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/npd-binding.yaml b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/npd-binding.yaml new file mode 100644 index 000000000..ced81a55a --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/npd-binding.yaml @@ -0,0 +1,15 @@ +# This is the role binding for the node-problem-detector. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: node-problem-detector-binding + labels: + kubernetes.io/cluster-service: "true" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-problem-detector +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:node-problem-detector diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/etcd-events.yaml b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/etcd-events.yaml new file mode 100644 index 000000000..ea83b1b60 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/etcd-events.yaml @@ -0,0 +1,51 @@ +apiVersion: v1 +kind: Pod +metadata: + name: etcd-server-events + namespace: kube-system +spec: + hostNetwork: true + nodeName: {{instance_prefix}}-master + containers: + - name: etcd-container + image: {{kube_docker_registry}}/etcd:{{etcd_image}} + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 100m + command: + - /bin/sh + - -c + - /usr/local/bin/etcd + {{params}} + 1>>/var/log/etcd-events.log 2>&1 + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /health + port: 4002 + scheme: HTTP + initialDelaySeconds: 15 + timeoutSeconds: 15 + ports: + - name: serverport + containerPort: 2381 + hostPort: 2381 + protocol: TCP + - name: clientport + containerPort: 4002 + hostPort: 4002 + protocol: TCP + volumeMounts: + - name: varetcd + mountPath: /var/etcd + - name: varlogetcd + mountPath: /var/log/etcd-events.log + volumes: + - name: varetcd + hostPath: + path: /var/etcd/events + - name: varlogetcd + hostPath: + path: /var/log/etcd-events.log + type: FileOrCreate diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/etcd.yaml b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/etcd.yaml new file mode 100644 index 000000000..2f7d88a63 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/etcd.yaml @@ -0,0 +1,50 @@ +apiVersion: v1 +kind: Pod +metadata: + name: etcd-server + namespace: kube-system +spec: + hostNetwork: true + containers: + - name: etcd-container + image: {{kube_docker_registry}}/etcd:{{etcd_image}} + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 200m + command: + - /bin/sh + - -c + - /usr/local/bin/etcd + {{params}} + 1>>/var/log/etcd.log 2>&1 + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /health + port: 2379 + scheme: HTTP + initialDelaySeconds: 15 + timeoutSeconds: 15 + ports: + - name: serverport + containerPort: 2380 + hostPort: 2380 + protocol: TCP + - name: clientport + containerPort: 2379 + hostPort: 2379 + protocol: TCP + volumeMounts: + - name: varetcd + mountPath: /var/etcd + - name: varlogetcd + mountPath: /var/log/etcd.log + volumes: + - name: varetcd + hostPath: + path: /var/etcd + - name: varlogetcd + hostPath: + path: /var/log/etcd.log + type: FileOrCreate diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-addon-manager.yaml b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-addon-manager.yaml new file mode 100644 index 000000000..88c14a585 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-addon-manager.yaml @@ -0,0 +1,34 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-addon-manager + namespace: kube-system + labels: + component: kube-addon-manager +spec: + hostNetwork: true + containers: + - name: kube-addon-manager + image: {{kube_docker_registry}}/kube-addon-manager:v9.0 + command: + - /bin/bash + - -c + - /opt/kube-addons.sh 1>>/var/log/kube-addon-manager.log 2>&1 + resources: + requests: + cpu: 5m + memory: 50Mi + volumeMounts: + - name: addons + mountPath: /etc/kubernetes/ + readOnly: true + - name: varlog + mountPath: /var/log/kube-addon-manager.log + volumes: + - name: addons + hostPath: + path: /etc/kubernetes/ + - name: varlog + hostPath: + path: /var/log/kube-addon-manager.log + type: FileOrCreate diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-apiserver.yaml b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-apiserver.yaml new file mode 100644 index 000000000..162d21442 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-apiserver.yaml @@ -0,0 +1,70 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-apiserver + namespace: kube-system +spec: + hostNetwork: true + containers: + - name: kube-apiserver + image: {{kube_docker_registry}}/kube-apiserver:{{kube-apiserver_docker_tag}} + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 250m + command: + - /bin/sh + - -c + - /usr/local/bin/kube-apiserver + {{params}} + 1>>/var/log/kube-apiserver.log 2>&1 + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 15 + timeoutSeconds: 15 + ports: + - name: https + containerPort: 443 + hostPort: 443 + protocol: TCP + - name: local + containerPort: 8080 + hostPort: 8080 + protocol: TCP + volumeMounts: +{{audit_policy_config_mount}} + - name: srvkube + mountPath: /etc/srv/kubernetes + readOnly: true + - name: logfile + mountPath: /var/log/kube-apiserver.log + - name: etcssl + mountPath: /etc/ssl + readOnly: true + - name: usrsharecacerts + mountPath: /usr/share/ca-certificates + readOnly: true + - name: srvsshproxy + mountPath: /etc/srv/sshproxy + volumes: +{{audit_policy_config_volume}} + - name: srvkube + hostPath: + path: /etc/srv/kubernetes + - name: logfile + hostPath: + path: /var/log/kube-apiserver.log + type: FileOrCreate + - name: etcssl + hostPath: + path: /etc/ssl + - name: usrsharecacerts + hostPath: + path: /usr/share/ca-certificates + - name: srvsshproxy + hostPath: + path: /etc/srv/sshproxy diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-controller-manager.yaml b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-controller-manager.yaml new file mode 100644 index 000000000..c465b9ecc --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-controller-manager.yaml @@ -0,0 +1,54 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-controller-manager + namespace: kube-system +spec: + hostNetwork: true + containers: + - name: kube-controller-manager + image: {{kube_docker_registry}}/kube-controller-manager:{{kube-controller-manager_docker_tag}} + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 200m + command: + - /bin/sh + - -c + - /usr/local/bin/kube-controller-manager + {{params}} + 1>>/var/log/kube-controller-manager.log 2>&1 + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: 10252 + scheme: HTTP + initialDelaySeconds: 15 + timeoutSeconds: 15 + volumeMounts: + - name: srvkube + mountPath: /etc/srv/kubernetes + readOnly: true + - name: logfile + mountPath: /var/log/kube-controller-manager.log + - name: etcssl + mountPath: /etc/ssl + readOnly: true + - name: usrsharecacerts + mountPath: /usr/share/ca-certificates + readOnly: true + volumes: + - name: srvkube + hostPath: + path: /etc/srv/kubernetes + - name: logfile + hostPath: + path: /var/log/kube-controller-manager.log + type: FileOrCreate + - name: etcssl + hostPath: + path: /etc/ssl + - name: usrsharecacerts + hostPath: + path: /usr/share/ca-certificates diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-scheduler.yaml b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-scheduler.yaml new file mode 100644 index 000000000..daf172829 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-scheduler.yaml @@ -0,0 +1,43 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-scheduler + namespace: kube-system +spec: + hostNetwork: true + nodeName: {{instance_prefix}}-master + containers: + - name: kube-scheduler + image: {{kube_docker_registry}}/kube-scheduler:{{kube-scheduler_docker_tag}} + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 100m + command: + - /bin/sh + - -c + - /usr/local/bin/kube-scheduler + {{params}} + 1>>/var/log/kube-scheduler.log 2>&1 + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: 10251 + scheme: HTTP + initialDelaySeconds: 15 + timeoutSeconds: 15 + volumeMounts: + - name: srvkube + mountPath: /etc/srv/kubernetes + readOnly: true + - name: logfile + mountPath: /var/log/kube-scheduler.log + volumes: + - name: srvkube + hostPath: + path: /etc/srv/kubernetes + - name: logfile + hostPath: + path: /var/log/kube-scheduler.log + type: FileOrCreate diff --git a/vendor/k8s.io/kubernetes/test/kubemark/resources/start-kubemark-master.sh b/vendor/k8s.io/kubernetes/test/kubemark/resources/start-kubemark-master.sh new file mode 100755 index 000000000..68082705b --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/resources/start-kubemark-master.sh @@ -0,0 +1,738 @@ +#!/usr/bin/env bash + +# Copyright 2015 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Script that starts kubelet on kubemark-master as a supervisord process +# and then runs the master components as pods using kubelet. + +set -o errexit +set -o nounset +set -o pipefail + +# Define key path variables. +KUBE_ROOT="/home/kubernetes" +KUBE_BINDIR="${KUBE_ROOT}/kubernetes/server/bin" + +function config-ip-firewall { + echo "Configuring IP firewall rules" + # The GCI image has host firewall which drop most inbound/forwarded packets. + # We need to add rules to accept all TCP/UDP/ICMP packets. + if iptables -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then + echo "Add rules to accept all inbound TCP/UDP/ICMP packets" + iptables -A INPUT -w -p TCP -j ACCEPT + iptables -A INPUT -w -p UDP -j ACCEPT + iptables -A INPUT -w -p ICMP -j ACCEPT + fi + if iptables -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then + echo "Add rules to accept all forwarded TCP/UDP/ICMP packets" + iptables -A FORWARD -w -p TCP -j ACCEPT + iptables -A FORWARD -w -p UDP -j ACCEPT + iptables -A FORWARD -w -p ICMP -j ACCEPT + fi +} + +function create-dirs { + echo "Creating required directories" + mkdir -p /var/lib/kubelet + mkdir -p /etc/kubernetes/manifests + mkdir -p /etc/kubernetes/addons +} + +# Setup working directory for kubelet. +function setup-kubelet-dir { + echo "Making /var/lib/kubelet executable for kubelet" + mount -B /var/lib/kubelet /var/lib/kubelet/ + mount -B -o remount,exec,suid,dev /var/lib/kubelet +} + +# Remove any default etcd config dirs/files. +function delete-default-etcd-configs { + if [[ -d /etc/etcd ]]; then + rm -rf /etc/etcd + fi + if [[ -e /etc/default/etcd ]]; then + rm -f /etc/default/etcd + fi + if [[ -e /etc/systemd/system/etcd.service ]]; then + rm -f /etc/systemd/system/etcd.service + fi + if [[ -e /etc/init.d/etcd ]]; then + rm -f /etc/init.d/etcd + fi +} + +# Compute etcd related variables. +function compute-etcd-variables { + ETCD_IMAGE="${ETCD_IMAGE:-}" + ETCD_QUOTA_BYTES="" + if [ "${ETCD_VERSION:0:2}" == "3." ]; then + # TODO: Set larger quota to see if that helps with + # 'mvcc: database space exceeded' errors. If so, pipe + # though our setup scripts. + ETCD_QUOTA_BYTES=" --quota-backend-bytes=4294967296 " + fi +} + +# Formats the given device ($1) if needed and mounts it at given mount point +# ($2). +function safe-format-and-mount() { + device=$1 + mountpoint=$2 + + # Format only if the disk is not already formatted. + if ! tune2fs -l "${device}" ; then + echo "Formatting '${device}'" + mkfs.ext4 -F "${device}" + fi + + echo "Mounting '${device}' at '${mountpoint}'" + mount -o discard,defaults "${device}" "${mountpoint}" +} + +# Finds a PD device with name '$1' attached to the master. +function find-attached-pd() { + local -r pd_name=$1 + if [[ ! -e /dev/disk/by-id/${pd_name} ]]; then + echo "" + fi + device_info=$(ls -l "/dev/disk/by-id/${pd_name}") + relative_path=${device_info##* } + echo "/dev/disk/by-id/${relative_path}" +} + +# Mounts a persistent disk (formatting if needed) to store the persistent data +# on the master. safe-format-and-mount only formats an unformatted disk, and +# mkdir -p will leave a directory be if it already exists. +function mount-pd() { + local -r pd_name=$1 + local -r mount_point=$2 + + if [[ -z "${find-attached-pd ${pd_name}}" ]]; then + echo "Can't find ${pd_name}. Skipping mount." + return + fi + + local -r pd_path="/dev/disk/by-id/${pd_name}" + echo "Mounting PD '${pd_path}' at '${mount_point}'" + # Format and mount the disk, create directories on it for all of the master's + # persistent data, and link them to where they're used. + mkdir -p "${mount_point}" + safe-format-and-mount "${pd_path}" "${mount_point}" + echo "Mounted PD '${pd_path}' at '${mount_point}'" + + # NOTE: These locations on the PD store persistent data, so to maintain + # upgradeability, these locations should not change. If they do, take care + # to maintain a migration path from these locations to whatever new + # locations. +} + +# Create kubeconfig for controller-manager's service account authentication. +function create-kubecontrollermanager-kubeconfig { + echo "Creating kube-controller-manager kubeconfig file" + mkdir -p "${KUBE_ROOT}/k8s_auth_data/kube-controller-manager" + cat <"${KUBE_ROOT}/k8s_auth_data/kube-controller-manager/kubeconfig" +apiVersion: v1 +kind: Config +users: +- name: kube-controller-manager + user: + token: ${KUBE_CONTROLLER_MANAGER_TOKEN} +clusters: +- name: local + cluster: + insecure-skip-tls-verify: true + server: https://localhost:443 +contexts: +- context: + cluster: local + user: kube-controller-manager + name: service-account-context +current-context: service-account-context +EOF +} + +function create-kubescheduler-kubeconfig { + echo "Creating kube-scheduler kubeconfig file" + mkdir -p "${KUBE_ROOT}/k8s_auth_data/kube-scheduler" + cat <"${KUBE_ROOT}/k8s_auth_data/kube-scheduler/kubeconfig" +apiVersion: v1 +kind: Config +users: +- name: kube-scheduler + user: + token: ${KUBE_SCHEDULER_TOKEN} +clusters: +- name: local + cluster: + insecure-skip-tls-verify: true + server: https://localhost:443 +contexts: +- context: + cluster: local + user: kube-scheduler + name: kube-scheduler +current-context: kube-scheduler +EOF +} + +function assemble-docker-flags { + echo "Assemble docker command line flags" + local docker_opts="-p /var/run/docker.pid --iptables=false --ip-masq=false" + docker_opts+=" --log-level=debug" # Since it's a test cluster + # TODO(shyamjvs): Incorporate network plugin options, etc later. + echo "DOCKER_OPTS=\"${docker_opts}\"" > /etc/default/docker + echo "DOCKER_NOFILE=65536" >> /etc/default/docker # For setting ulimit -n + systemctl restart docker +} + +# A helper function for loading a docker image. It keeps trying up to 5 times. +# +# $1: Full path of the docker image +function try-load-docker-image { + local -r img=$1 + echo "Try to load docker image file ${img}" + # Temporarily turn off errexit, because we don't want to exit on first failure. + set +e + local -r max_attempts=5 + local -i attempt_num=1 + until timeout 30 docker load -i "${img}"; do + if [[ "${attempt_num}" == "${max_attempts}" ]]; then + echo "Fail to load docker image file ${img} after ${max_attempts} retries. Exit!!" + exit 1 + else + attempt_num=$((attempt_num+1)) + sleep 5 + fi + done + # Re-enable errexit. + set -e +} + +# Loads kube-system docker images. It is better to do it before starting kubelet, +# as kubelet will restart docker daemon, which may interfere with loading images. +function load-docker-images { + echo "Start loading kube-system docker images" + local -r img_dir="${KUBE_BINDIR}" + try-load-docker-image "${img_dir}/kube-apiserver.tar" + try-load-docker-image "${img_dir}/kube-controller-manager.tar" + try-load-docker-image "${img_dir}/kube-scheduler.tar" +} + +# Computes command line arguments to be passed to kubelet. +function compute-kubelet-params { + local params="${KUBELET_TEST_ARGS:-}" + params+=" --allow-privileged=true" + params+=" --cgroup-root=/" + params+=" --cloud-provider=gce" + params+=" --pod-manifest-path=/etc/kubernetes/manifests" + if [[ -n "${KUBELET_PORT:-}" ]]; then + params+=" --port=${KUBELET_PORT}" + fi + params+=" --enable-debugging-handlers=false" + params+=" --hairpin-mode=none" + echo "${params}" +} + +# Creates the systemd config file for kubelet.service. +function create-kubelet-conf() { + local -r kubelet_bin="$1" + local -r kubelet_env_file="/etc/default/kubelet" + local -r flags=$(compute-kubelet-params) + echo "KUBELET_OPTS=\"${flags}\"" > "${kubelet_env_file}" + + # Write the systemd service file for kubelet. + cat </etc/systemd/system/kubelet.service +[Unit] +Description=Kubermark kubelet +Requires=network-online.target +After=network-online.target + +[Service] +Restart=always +RestartSec=10 +EnvironmentFile=${kubelet_env_file} +ExecStart=${kubelet_bin} \$KUBELET_OPTS + +[Install] +WantedBy=multi-user.target +EOF +} + +# This function assembles the kubelet systemd service file and starts it using +# systemctl, on the kubemark master. +function start-kubelet { + # Create systemd config. + local -r kubelet_bin="/usr/bin/kubelet" + create-kubelet-conf "${kubelet_bin}" + + # Flush iptables nat table + iptables -t nat -F || true + + # Start the kubelet service. + systemctl start kubelet.service +} + +# Create the log file and set its properties. +# +# $1 is the file to create. +function prepare-log-file { + touch "$1" + chmod 644 "$1" + chown root:root "$1" +} + +# A helper function for copying addon manifests and set dir/files +# permissions. +# +# $1: addon category under /etc/kubernetes +# $2: manifest source dir +function setup-addon-manifests { + local -r src_dir="${KUBE_ROOT}/$2" + local -r dst_dir="/etc/kubernetes/$1/$2" + + if [[ ! -d "${dst_dir}" ]]; then + mkdir -p "${dst_dir}" + fi + + local files + files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml") + if [[ -n "${files}" ]]; then + cp "${src_dir}/"*.yaml "${dst_dir}" + fi + chown -R root:root "${dst_dir}" + chmod 755 "${dst_dir}" + chmod 644 "${dst_dir}"/* +} + +# Write the config for the audit policy. +# Note: This duplicates the function in cluster/gce/gci/configure-helper.sh. +# TODO: Get rid of this function when #53321 is fixed. +function create-master-audit-policy { + local -r path="${1}" + local -r policy="${2:-}" + + if [[ -n "${policy}" ]]; then + echo "${policy}" > "${path}" + return + fi + + # Known api groups + local -r known_apis=' + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io"' + + cat <"${path}" +apiVersion: audit.k8s.io/v1 +kind: Policy +rules: + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + # Ingress controller reads 'configmaps/ingress-uid' through the unsecured port. + # TODO(#46983): Change this to the ingress controller service account. + users: ["system:unsecured"] + namespaces: ["kube-system"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["configmaps"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: ${known_apis} + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: ${known_apis} + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" +EOF +} + +# Computes command line arguments to be passed to etcd. +function compute-etcd-params { + local params="${ETCD_TEST_ARGS:-}" + params+=" --listen-peer-urls=http://127.0.0.1:2380" + params+=" --advertise-client-urls=http://127.0.0.1:2379" + params+=" --listen-client-urls=http://0.0.0.0:2379" + params+=" --data-dir=/var/etcd/data" + params+=" ${ETCD_QUOTA_BYTES}" + echo "${params}" +} + +# Computes command line arguments to be passed to etcd-events. +function compute-etcd-events-params { + local params="${ETCD_TEST_ARGS:-}" + params+=" --listen-peer-urls=http://127.0.0.1:2381" + params+=" --advertise-client-urls=http://127.0.0.1:4002" + params+=" --listen-client-urls=http://0.0.0.0:4002" + params+=" --data-dir=/var/etcd/data-events" + params+=" ${ETCD_QUOTA_BYTES}" + echo "${params}" +} + +# Computes command line arguments to be passed to apiserver. +function compute-kube-apiserver-params { + local params="--insecure-bind-address=0.0.0.0" + params+=" --etcd-servers=${ETCD_SERVERS:-http://127.0.0.1:2379}" + if [[ -z "${ETCD_SERVERS:-}" ]]; then + params+=" --etcd-servers-overrides=${ETCD_SERVERS_OVERRIDES:-/events#${EVENT_STORE_URL}}" + elif [[ -n "${ETCD_SERVERS_OVERRIDES:-}" ]]; then + params+=" --etcd-servers-overrides=${ETCD_SERVERS_OVERRIDES:-}" + fi + params+=" --tls-cert-file=/etc/srv/kubernetes/server.cert" + params+=" --tls-private-key-file=/etc/srv/kubernetes/server.key" + params+=" --requestheader-client-ca-file=/etc/srv/kubernetes/aggr_ca.crt" + params+=" --requestheader-allowed-names=aggregator" + params+=" --requestheader-extra-headers-prefix=X-Remote-Extra-" + params+=" --requestheader-group-headers=X-Remote-Group" + params+=" --requestheader-username-headers=X-Remote-User" + params+=" --proxy-client-cert-file=/etc/srv/kubernetes/proxy_client.crt" + params+=" --proxy-client-key-file=/etc/srv/kubernetes/proxy_client.key" + params+=" --enable-aggregator-routing=true" + params+=" --client-ca-file=/etc/srv/kubernetes/ca.crt" + params+=" --token-auth-file=/etc/srv/kubernetes/known_tokens.csv" + params+=" --secure-port=443" + params+=" --basic-auth-file=/etc/srv/kubernetes/basic_auth.csv" + params+=" --target-ram-mb=$((NUM_NODES * 60))" + params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}" + params+=" --admission-control=${CUSTOM_ADMISSION_PLUGINS}" + params+=" --authorization-mode=Node,RBAC" + params+=" --allow-privileged=true" + if [[ -n "${STORAGE_BACKEND:-}" ]]; then + params+=" --storage-backend=${STORAGE_BACKEND}" + fi + if [[ -n "${STORAGE_MEDIA_TYPE:-}" ]]; then + params+=" --storage-media-type=${STORAGE_MEDIA_TYPE}" + fi + if [[ -n "${ETCD_COMPACTION_INTERVAL_SEC:-}" ]]; then + params+=" --etcd-compaction-interval=${ETCD_COMPACTION_INTERVAL_SEC}s" + fi + if [[ -n "${KUBE_APISERVER_REQUEST_TIMEOUT:-}" ]]; then + params+=" --min-request-timeout=${KUBE_APISERVER_REQUEST_TIMEOUT}" + fi + if [[ "${NUM_NODES}" -ge 3000 ]]; then + params+=" --max-requests-inflight=3000 --max-mutating-requests-inflight=1000" + elif [[ "${NUM_NODES}" -ge 1000 ]]; then + params+=" --max-requests-inflight=1500 --max-mutating-requests-inflight=500" + fi + if [[ -n "${RUNTIME_CONFIG:-}" ]]; then + params+=" --runtime-config=${RUNTIME_CONFIG}" + fi + if [[ -n "${FEATURE_GATES:-}" ]]; then + params+=" --feature-gates=${FEATURE_GATES}" + fi + if [[ "${ENABLE_APISERVER_ADVANCED_AUDIT:-}" == "true" ]]; then + # Create the audit policy file, and mount it into the apiserver pod. + create-master-audit-policy "${audit_policy_file}" "${ADVANCED_AUDIT_POLICY:-}" + + # The config below matches the one in cluster/gce/gci/configure-helper.sh. + # TODO: Currently supporting just log backend. Support webhook if needed. + params+=" --audit-policy-file=${audit_policy_file}" + params+=" --audit-log-path=/var/log/kube-apiserver-audit.log" + params+=" --audit-log-maxage=0" + params+=" --audit-log-maxbackup=0" + params+=" --audit-log-maxsize=2000000000" + fi + # Append APISERVER_TEST_ARGS to the end, which will allow for + # the above defaults to be overridden. + params+=" ${APISERVER_TEST_ARGS:-}" + echo "${params}" +} + +# Computes command line arguments to be passed to controller-manager. +function compute-kube-controller-manager-params { + local params="${CONTROLLER_MANAGER_TEST_ARGS:-}" + params+=" --use-service-account-credentials" + params+=" --kubeconfig=/etc/srv/kubernetes/kube-controller-manager/kubeconfig" + params+=" --service-account-private-key-file=/etc/srv/kubernetes/server.key" + params+=" --root-ca-file=/etc/srv/kubernetes/ca.crt" + params+=" --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}" + params+=" --cluster-cidr=${CLUSTER_IP_RANGE}" + params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}" + params+=" --terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}" + echo "${params}" +} + +# Computes command line arguments to be passed to scheduler. +function compute-kube-scheduler-params { + local params="${SCHEDULER_TEST_ARGS:-}" + params+=" --kubeconfig=/etc/srv/kubernetes/kube-scheduler/kubeconfig" + echo "${params}" +} + +# Computes command line arguments to be passed to addon-manager. +function compute-kube-addon-manager-params { + echo "" +} + +# Start a kubernetes master component '$1' which can be any of the following: +# 1. etcd +# 2. etcd-events +# 3. kube-apiserver +# 4. kube-controller-manager +# 5. kube-scheduler +# 6. kube-addon-manager +# +# It prepares the log file, loads the docker tag, calculates variables, sets them +# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests. +# +# Assumed vars: +# DOCKER_REGISTRY +function start-kubemaster-component() { + echo "Start master component $1" + local -r component=$1 + prepare-log-file /var/log/"${component}".log + local -r src_file="${KUBE_ROOT}/${component}.yaml" + local -r params=$("compute-${component}-params") + + # Evaluate variables. + sed -i -e "s@{{params}}@${params}@g" "${src_file}" + sed -i -e "s@{{kube_docker_registry}}@${DOCKER_REGISTRY}@g" "${src_file}" + sed -i -e "s@{{instance_prefix}}@${INSTANCE_PREFIX}@g" "${src_file}" + if [ "${component:0:4}" == "etcd" ]; then + sed -i -e "s@{{etcd_image}}@${ETCD_IMAGE}@g" "${src_file}" + elif [ "${component}" == "kube-addon-manager" ]; then + setup-addon-manifests "addons" "kubemark-rbac-bindings" + else + local -r component_docker_tag=$(cat "${KUBE_BINDIR}/${component}.docker_tag") + sed -i -e "s@{{${component}_docker_tag}}@${component_docker_tag}@g" "${src_file}" + if [ "${component}" == "kube-apiserver" ]; then + local audit_policy_config_mount="" + local audit_policy_config_volume="" + if [[ "${ENABLE_APISERVER_ADVANCED_AUDIT:-}" == "true" ]]; then + read -r -d '' audit_policy_config_mount << EOF +- name: auditpolicyconfigmount + mountPath: ${audit_policy_file} + readOnly: true +EOF + read -r -d '' audit_policy_config_volume << EOF +- name: auditpolicyconfigmount + hostPath: + path: ${audit_policy_file} + type: FileOrCreate +EOF + fi + sed -i -e "s@{{audit_policy_config_mount}}@${audit_policy_config_mount}@g" "${src_file}" + sed -i -e "s@{{audit_policy_config_volume}}@${audit_policy_config_volume}@g" "${src_file}" + fi + fi + cp "${src_file}" /etc/kubernetes/manifests +} + +############################### Main Function ######################################## +echo "Start to configure master instance for kubemark" + +# Extract files from the server tar and setup master env variables. +cd "${KUBE_ROOT}" +if [[ ! -d "${KUBE_ROOT}/kubernetes" ]]; then + tar xzf kubernetes-server-linux-amd64.tar.gz +fi +source "${KUBE_ROOT}/kubemark-master-env.sh" + +# Setup IP firewall rules, required directory structure and etcd config. +config-ip-firewall +create-dirs +setup-kubelet-dir +delete-default-etcd-configs +compute-etcd-variables + +# Setup authentication tokens and kubeconfigs for kube-controller-manager and kube-scheduler, +# only if their kubeconfigs don't already exist as this script could be running on reboot. +if [[ ! -f "${KUBE_ROOT}/k8s_auth_data/kube-controller-manager/kubeconfig" ]]; then + KUBE_CONTROLLER_MANAGER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) + echo "${KUBE_CONTROLLER_MANAGER_TOKEN},system:kube-controller-manager,uid:system:kube-controller-manager" >> "${KUBE_ROOT}/k8s_auth_data/known_tokens.csv" + create-kubecontrollermanager-kubeconfig +fi +if [[ ! -f "${KUBE_ROOT}/k8s_auth_data/kube-scheduler/kubeconfig" ]]; then + KUBE_SCHEDULER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) + echo "${KUBE_SCHEDULER_TOKEN},system:kube-scheduler,uid:system:kube-scheduler" >> "${KUBE_ROOT}/k8s_auth_data/known_tokens.csv" + create-kubescheduler-kubeconfig +fi + +# Mount master PD for etcd and create symbolic links to it. +{ + main_etcd_mount_point="/mnt/disks/master-pd" + mount-pd "google-master-pd" "${main_etcd_mount_point}" + # Contains all the data stored in etcd. + mkdir -p "${main_etcd_mount_point}/var/etcd" + chmod 700 "${main_etcd_mount_point}/var/etcd" + ln -s -f "${main_etcd_mount_point}/var/etcd" /var/etcd + mkdir -p /etc/srv + # Setup the dynamically generated apiserver auth certs and keys to pd. + mkdir -p "${main_etcd_mount_point}/srv/kubernetes" + ln -s -f "${main_etcd_mount_point}/srv/kubernetes" /etc/srv/kubernetes + # Copy the files to the PD only if they don't exist (so we do it only the first time). + if [[ "$(ls -A ${main_etcd_mount_point}/srv/kubernetes/)" == "" ]]; then + cp -r "${KUBE_ROOT}"/k8s_auth_data/* "${main_etcd_mount_point}/srv/kubernetes/" + fi + # Directory for kube-apiserver to store SSH key (if necessary). + mkdir -p "${main_etcd_mount_point}/srv/sshproxy" + ln -s -f "${main_etcd_mount_point}/srv/sshproxy" /etc/srv/sshproxy +} + +# Mount master PD for event-etcd (if required) and create symbolic links to it. +{ + EVENT_STORE_IP="${EVENT_STORE_IP:-127.0.0.1}" + EVENT_STORE_URL="${EVENT_STORE_URL:-http://${EVENT_STORE_IP}:4002}" + if [ "${EVENT_PD:-}" == "true" ]; then + event_etcd_mount_point="/mnt/disks/master-event-pd" + mount-pd "google-master-event-pd" "${event_etcd_mount_point}" + # Contains all the data stored in event etcd. + mkdir -p "${event_etcd_mount_point}/var/etcd/events" + chmod 700 "${event_etcd_mount_point}/var/etcd/events" + ln -s -f "${event_etcd_mount_point}/var/etcd/events" /var/etcd/events + fi +} + +# Setup docker flags and load images of the master components. +assemble-docker-flags +DOCKER_REGISTRY="k8s.gcr.io" +load-docker-images + +readonly audit_policy_file="/etc/audit_policy.config" + +# Start kubelet as a supervisord process and master components as pods. +start-kubelet +if [[ -z "${ETCD_SERVERS:-}" ]]; then + start-kubemaster-component "etcd" + if [ "${EVENT_STORE_IP:-}" == "127.0.0.1" ]; then + start-kubemaster-component "etcd-events" + fi +fi +start-kubemaster-component "kube-apiserver" +start-kubemaster-component "kube-controller-manager" +start-kubemaster-component "kube-scheduler" +start-kubemaster-component "kube-addon-manager" + +# Wait till apiserver is working fine or timeout. +echo -n "Waiting for apiserver to be healthy" +start=$(date +%s) +until [ "$(curl 127.0.0.1:8080/healthz 2> /dev/null)" == "ok" ]; do + echo -n "." + sleep 1 + now=$(date +%s) + if [ $((now - start)) -gt 300 ]; then + echo "Timeout!" + exit 1 + fi +done + +echo "Done for the configuration for kubermark master" diff --git a/vendor/k8s.io/kubernetes/test/kubemark/run-e2e-tests.sh b/vendor/k8s.io/kubernetes/test/kubemark/run-e2e-tests.sh new file mode 100755 index 000000000..c9260cb19 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/run-e2e-tests.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +# Copyright 2015 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export KUBERNETES_PROVIDER="kubemark" +export KUBE_CONFIG_FILE="config-default.sh" + +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../.. + +# We need an absolute path to KUBE_ROOT +ABSOLUTE_ROOT=$(readlink -f "${KUBE_ROOT}") + +source "${KUBE_ROOT}/cluster/kubemark/util.sh" + +echo "Kubemark master name: ${MASTER_NAME}" + +detect-master + +export KUBE_MASTER_URL="https://${KUBE_MASTER_IP}" +export KUBECONFIG="${ABSOLUTE_ROOT}/test/kubemark/resources/kubeconfig.kubemark" +export E2E_MIN_STARTUP_PODS=0 + +if [[ -z "$*" ]]; then + ARGS=('--ginkgo.focus=[Feature:Performance]') +else + ARGS=("$@") +fi + +if [[ "${ENABLE_KUBEMARK_CLUSTER_AUTOSCALER}" == "true" ]]; then + ARGS+=("--kubemark-external-kubeconfig=${DEFAULT_KUBECONFIG}") +fi + +if [[ -f /.dockerenv ]]; then + # Running inside a dockerized runner. + go run ./hack/e2e.go -- --check-version-skew=false --test --test_args="--e2e-verify-service-account=false --dump-logs-on-failure=false ${ARGS[*]}" +else + # Running locally. + for ((i=0; i < ${ARGS[@]}; i++)); do + ARGS[$i]="$(echo "ARGS[$i]" | sed -e 's/\[/\\\[/g' -e 's/\]/\\\]/g' )" + done + "${KUBE_ROOT}/hack/ginkgo-e2e.sh" "--e2e-verify-service-account=false" "--dump-logs-on-failure=false" "${ARGS[@]}" +fi diff --git a/vendor/k8s.io/kubernetes/test/kubemark/skeleton/util.sh b/vendor/k8s.io/kubernetes/test/kubemark/skeleton/util.sh new file mode 100644 index 000000000..0620dc049 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/skeleton/util.sh @@ -0,0 +1,93 @@ +#!/usr/bin/env bash + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script contains the helper functions that each provider hosting +# Kubermark must implement to use test/kubemark/start-kubemark.sh and +# test/kubemark/stop-kubemark.sh scripts. + +# This function should authenticate docker to be able to read/write to +# the right container registry (needed for pushing kubemark image). +function authenticate-docker { + echo "Configuring registry authentication" 1>&2 +} + +# This function should get master IP address (creating one if needed). +# ENV vars that should be defined by the end of this function: +# - MASTER_IP +# +# Recommended for this function to include retrying logic in case of failures. +function get-or-create-master-ip { + echo "MASTER_IP: $MASTER_IP" 1>&2 +} + +# This function should create a machine instance for the master along +# with any/all of the following resources: +# - Attach a PD to the master (optionally 1 more for storing events) +# - A public IP address for the master ($MASTER_IP) +# - A network firewall rule allowing all TCP traffic on port 443 in master +# Note: This step is compulsory in order for kubemark to work +# +# ENV vars that should be defined by the end of this function: +# - MASTER_NAME +# +# Recommended for this function to include retrying logic for the above +# operations in case of failures. +function create-master-instance-with-resources { + echo "MASTER_IP: $MASTER_IP" 1>&2 + echo "MASTER_NAME: $MASTER_NAME" 1>&2 +} + +# This function should execute the command('$1') on the master machine +# (possibly through SSH), retrying in case of failure. The allowed number of +# retries would be '$2' (if not provided, default to single try). +function execute-cmd-on-master-with-retries() { + echo "Executing command on the master" 1>&2 +} + +# This function should act as an scp for the kubemark cluster, which copies +# the files given by the first n-1 arguments to the remote location given +# by the n^th argument. +# +# Recommended for this function to include retrying logic in case of failures. +function copy-files() { + echo "Copying files" 1>&2 +} + +# This function should delete the master instance along with all the +# resources that have been allocated inside the function +# 'create-master-instance-with-resources' above. +# +# Recommended for this function to include retrying logic in case of failures. +function delete-master-instance-and-resources { + echo "Deleting master instance and its allocated resources" 1>&2 +} + +# Common colors used throughout the kubemark scripts +if [[ -z "${color_start-}" ]]; then + declare -r color_start="\033[" + # shellcheck disable=SC2034 + declare -r color_red="${color_start}0;31m" + # shellcheck disable=SC2034 + declare -r color_yellow="${color_start}0;33m" + # shellcheck disable=SC2034 + declare -r color_green="${color_start}0;32m" + # shellcheck disable=SC2034 + declare -r color_blue="${color_start}1;34m" + # shellcheck disable=SC2034 + declare -r color_cyan="${color_start}1;36m" + # shellcheck disable=SC2034 + declare -r color_norm="${color_start}0m" +fi \ No newline at end of file diff --git a/vendor/k8s.io/kubernetes/test/kubemark/start-kubemark.sh b/vendor/k8s.io/kubernetes/test/kubemark/start-kubemark.sh new file mode 100755 index 000000000..cf685b0f5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/start-kubemark.sh @@ -0,0 +1,504 @@ +#!/usr/bin/env bash + +# Copyright 2015 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Script that creates a Kubemark cluster for any given cloud provider. + +set -o errexit +set -o nounset +set -o pipefail + +TMP_ROOT="$(dirname "${BASH_SOURCE[@]}")/../.." +KUBE_ROOT=$(readlink -e "${TMP_ROOT}" 2> /dev/null || perl -MCwd -e 'print Cwd::abs_path shift' "${TMP_ROOT}") + +source "${KUBE_ROOT}/test/kubemark/skeleton/util.sh" +source "${KUBE_ROOT}/test/kubemark/cloud-provider-config.sh" +source "${KUBE_ROOT}/test/kubemark/${CLOUD_PROVIDER}/util.sh" +source "${KUBE_ROOT}/cluster/kubemark/${CLOUD_PROVIDER}/config-default.sh" + +if [[ -f "${KUBE_ROOT}/test/kubemark/${CLOUD_PROVIDER}/startup.sh" ]] ; then + source "${KUBE_ROOT}/test/kubemark/${CLOUD_PROVIDER}/startup.sh" +fi + +source "${KUBE_ROOT}/cluster/kubemark/util.sh" + +# hack/lib/init.sh will ovewrite ETCD_VERSION if this is unset +# what what is default in hack/lib/etcd.sh +# To avoid it, if it is empty, we set it to 'avoid-overwrite' and +# clean it after that. +if [ -z "${ETCD_VERSION:-}" ]; then + ETCD_VERSION="avoid-overwrite" +fi +source "${KUBE_ROOT}/hack/lib/init.sh" +if [ "${ETCD_VERSION:-}" == "avoid-overwrite" ]; then + ETCD_VERSION="" +fi + +KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh" +KUBEMARK_DIRECTORY="${KUBE_ROOT}/test/kubemark" +RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources" + +# Generate a random 6-digit alphanumeric tag for the kubemark image. +# Used to uniquify image builds across different invocations of this script. +KUBEMARK_IMAGE_TAG=$(head /dev/urandom | tr -dc 'a-z0-9' | fold -w 6 | head -n 1) + +# Write all environment variables that we need to pass to the kubemark master, +# locally to the file ${RESOURCE_DIRECTORY}/kubemark-master-env.sh. +function create-master-environment-file { + cat > "${RESOURCE_DIRECTORY}/kubemark-master-env.sh" </dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) + KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) + NODE_PROBLEM_DETECTOR_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) + HEAPSTER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) + CLUSTER_AUTOSCALER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) + KUBE_DNS_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) + echo "Generated PKI authentication data for kubemark." +} + +# Wait for the master to be reachable for executing commands on it. We do this by +# trying to run the bash noop(:) on the master, with 10 retries. +function wait-for-master-reachability { + execute-cmd-on-master-with-retries ":" 10 + echo "Checked master reachability for remote command execution." +} + +# Write all the relevant certs/keys/tokens to the master. +function write-pki-config-to-master { + PKI_SETUP_CMD="sudo mkdir /home/kubernetes/k8s_auth_data -p && \ + sudo bash -c \"echo ${CA_CERT_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/ca.crt\" && \ + sudo bash -c \"echo ${MASTER_CERT_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/server.cert\" && \ + sudo bash -c \"echo ${MASTER_KEY_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/server.key\" && \ + sudo bash -c \"echo ${REQUESTHEADER_CA_CERT_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/aggr_ca.crt\" && \ + sudo bash -c \"echo ${PROXY_CLIENT_CERT_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/proxy_client.crt\" && \ + sudo bash -c \"echo ${PROXY_CLIENT_KEY_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/proxy_client.key\" && \ + sudo bash -c \"echo ${KUBECFG_CERT_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/kubecfg.crt\" && \ + sudo bash -c \"echo ${KUBECFG_KEY_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/kubecfg.key\" && \ + sudo bash -c \"echo \"${KUBE_BEARER_TOKEN},admin,admin\" > /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \ + sudo bash -c \"echo \"${KUBELET_TOKEN},system:node:node-name,uid:kubelet,system:nodes\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \ + sudo bash -c \"echo \"${KUBE_PROXY_TOKEN},system:kube-proxy,uid:kube_proxy\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \ + sudo bash -c \"echo \"${HEAPSTER_TOKEN},system:heapster,uid:heapster\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \ + sudo bash -c \"echo \"${CLUSTER_AUTOSCALER_TOKEN},system:cluster-autoscaler,uid:cluster-autoscaler\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \ + sudo bash -c \"echo \"${NODE_PROBLEM_DETECTOR_TOKEN},system:node-problem-detector,uid:system:node-problem-detector\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \ + sudo bash -c \"echo \"${KUBE_DNS_TOKEN},system:kube-dns,uid:kube-dns\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \ + sudo bash -c \"echo ${KUBE_PASSWORD},admin,admin > /home/kubernetes/k8s_auth_data/basic_auth.csv\"" + execute-cmd-on-master-with-retries "${PKI_SETUP_CMD}" 3 + echo "Wrote PKI certs, keys, tokens and admin password to master." +} + +# Write kubeconfig to ${RESOURCE_DIRECTORY}/kubeconfig.kubemark in order to +# use kubectl locally. +function write-local-kubeconfig { + LOCAL_KUBECONFIG="${RESOURCE_DIRECTORY}/kubeconfig.kubemark" + cat > "${LOCAL_KUBECONFIG}" << EOF +apiVersion: v1 +kind: Config +users: +- name: kubecfg + user: + client-certificate-data: "${KUBECFG_CERT_BASE64}" + client-key-data: "${KUBECFG_KEY_BASE64}" + username: admin + password: admin +clusters: +- name: kubemark + cluster: + certificate-authority-data: "${CA_CERT_BASE64}" + server: https://${MASTER_IP} +contexts: +- context: + cluster: kubemark + user: kubecfg + name: kubemark-context +current-context: kubemark-context +EOF + echo "Kubeconfig file for kubemark master written to ${LOCAL_KUBECONFIG}." +} + +# Copy all the necessary resource files (scripts/configs/manifests) to the master. +function copy-resource-files-to-master { + copy-files \ + "${SERVER_BINARY_TAR}" \ + "${RESOURCE_DIRECTORY}/kubemark-master-env.sh" \ + "${RESOURCE_DIRECTORY}/start-kubemark-master.sh" \ + "${RESOURCE_DIRECTORY}/kubeconfig.kubemark" \ + "${KUBEMARK_DIRECTORY}/configure-kubectl.sh" \ + "${RESOURCE_DIRECTORY}/manifests/etcd.yaml" \ + "${RESOURCE_DIRECTORY}/manifests/etcd-events.yaml" \ + "${RESOURCE_DIRECTORY}/manifests/kube-apiserver.yaml" \ + "${RESOURCE_DIRECTORY}/manifests/kube-scheduler.yaml" \ + "${RESOURCE_DIRECTORY}/manifests/kube-controller-manager.yaml" \ + "${RESOURCE_DIRECTORY}/manifests/kube-addon-manager.yaml" \ + "${RESOURCE_DIRECTORY}/manifests/addons/kubemark-rbac-bindings" \ + "kubernetes@${MASTER_NAME}":/home/kubernetes/ + echo "Copied server binary, master startup scripts, configs and resource manifests to master." +} + +# Make startup scripts executable and run start-kubemark-master.sh. +function start-master-components { + echo "" + MASTER_STARTUP_CMD="sudo bash /home/kubernetes/start-kubemark-master.sh" + execute-cmd-on-master-with-retries "${MASTER_STARTUP_CMD}" + echo "The master has started and is now live." +} + +# Create a docker image for hollow-node and upload it to the appropriate docker registry. +function create-and-upload-hollow-node-image { + authenticate-docker + KUBEMARK_IMAGE_REGISTRY="${KUBEMARK_IMAGE_REGISTRY:-${CONTAINER_REGISTRY}/${PROJECT}}" + if [[ "${KUBEMARK_BAZEL_BUILD:-}" =~ ^[yY]$ ]]; then + # Build+push the image through bazel. + touch WORKSPACE # Needed for bazel. + build_cmd=("bazel" "run" "//cluster/images/kubemark:push" "--define" "REGISTRY=${KUBEMARK_IMAGE_REGISTRY}" "--define" "IMAGE_TAG=${KUBEMARK_IMAGE_TAG}") + run-cmd-with-retries "${build_cmd[@]}" + else + # Build+push the image through makefile. + build_cmd=("make" "${KUBEMARK_IMAGE_MAKE_TARGET}") + MAKE_DIR="${KUBE_ROOT}/cluster/images/kubemark" + KUBEMARK_BIN="$(kube::util::find-binary-for-platform kubemark linux/amd64)" + if [[ -z "${KUBEMARK_BIN}" ]]; then + echo 'Cannot find cmd/kubemark binary' + exit 1 + fi + echo "Copying kubemark binary to ${MAKE_DIR}" + cp "${KUBEMARK_BIN}" "${MAKE_DIR}" + CURR_DIR=$(pwd) + cd "${MAKE_DIR}" + REGISTRY=${KUBEMARK_IMAGE_REGISTRY} IMAGE_TAG=${KUBEMARK_IMAGE_TAG} run-cmd-with-retries "${build_cmd[@]}" + rm kubemark + cd "$CURR_DIR" + fi + echo "Created and uploaded the kubemark hollow-node image to docker registry." + # Cleanup the kubemark image after the script exits. + if [[ "${CLEANUP_KUBEMARK_IMAGE:-}" == "true" ]]; then + trap delete-kubemark-image EXIT + fi +} + +function delete-kubemark-image { + delete-image "${KUBEMARK_IMAGE_REGISTRY}/kubemark:${KUBEMARK_IMAGE_TAG}" +} + +# Generate secret and configMap for the hollow-node pods to work, prepare +# manifests of the hollow-node and heapster replication controllers from +# templates, and finally create these resources through kubectl. +function create-kube-hollow-node-resources { + # Create kubeconfig for Kubelet. + KUBELET_KUBECONFIG_CONTENTS="apiVersion: v1 +kind: Config +users: +- name: kubelet + user: + client-certificate-data: ${KUBELET_CERT_BASE64} + client-key-data: ${KUBELET_KEY_BASE64} +clusters: +- name: kubemark + cluster: + certificate-authority-data: ${CA_CERT_BASE64} + server: https://${MASTER_IP} +contexts: +- context: + cluster: kubemark + user: kubelet + name: kubemark-context +current-context: kubemark-context" + + # Create kubeconfig for Kubeproxy. + KUBEPROXY_KUBECONFIG_CONTENTS="apiVersion: v1 +kind: Config +users: +- name: kube-proxy + user: + token: ${KUBE_PROXY_TOKEN} +clusters: +- name: kubemark + cluster: + insecure-skip-tls-verify: true + server: https://${MASTER_IP} +contexts: +- context: + cluster: kubemark + user: kube-proxy + name: kubemark-context +current-context: kubemark-context" + + # Create kubeconfig for Heapster. + HEAPSTER_KUBECONFIG_CONTENTS="apiVersion: v1 +kind: Config +users: +- name: heapster + user: + token: ${HEAPSTER_TOKEN} +clusters: +- name: kubemark + cluster: + insecure-skip-tls-verify: true + server: https://${MASTER_IP} +contexts: +- context: + cluster: kubemark + user: heapster + name: kubemark-context +current-context: kubemark-context" + + # Create kubeconfig for Cluster Autoscaler. + CLUSTER_AUTOSCALER_KUBECONFIG_CONTENTS="apiVersion: v1 +kind: Config +users: +- name: cluster-autoscaler + user: + token: ${CLUSTER_AUTOSCALER_TOKEN} +clusters: +- name: kubemark + cluster: + insecure-skip-tls-verify: true + server: https://${MASTER_IP} +contexts: +- context: + cluster: kubemark + user: cluster-autoscaler + name: kubemark-context +current-context: kubemark-context" + + # Create kubeconfig for NodeProblemDetector. + NPD_KUBECONFIG_CONTENTS="apiVersion: v1 +kind: Config +users: +- name: node-problem-detector + user: + token: ${NODE_PROBLEM_DETECTOR_TOKEN} +clusters: +- name: kubemark + cluster: + insecure-skip-tls-verify: true + server: https://${MASTER_IP} +contexts: +- context: + cluster: kubemark + user: node-problem-detector + name: kubemark-context +current-context: kubemark-context" + + # Create kubeconfig for Kube DNS. + KUBE_DNS_KUBECONFIG_CONTENTS="apiVersion: v1 +kind: Config +users: +- name: kube-dns + user: + token: ${KUBE_DNS_TOKEN} +clusters: +- name: kubemark + cluster: + insecure-skip-tls-verify: true + server: https://${MASTER_IP} +contexts: +- context: + cluster: kubemark + user: kube-dns + name: kubemark-context +current-context: kubemark-context" + + # Create kubemark namespace. + "${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/kubemark-ns.json" + + # Create configmap for configuring hollow- kubelet, proxy and npd. + "${KUBECTL}" create configmap "node-configmap" --namespace="kubemark" \ + --from-literal=content.type="${TEST_CLUSTER_API_CONTENT_TYPE}" \ + --from-file=kernel.monitor="${RESOURCE_DIRECTORY}/kernel-monitor.json" + + # Create secret for passing kubeconfigs to kubelet, kubeproxy and npd. + "${KUBECTL}" create secret generic "kubeconfig" --type=Opaque --namespace="kubemark" \ + --from-literal=kubelet.kubeconfig="${KUBELET_KUBECONFIG_CONTENTS}" \ + --from-literal=kubeproxy.kubeconfig="${KUBEPROXY_KUBECONFIG_CONTENTS}" \ + --from-literal=heapster.kubeconfig="${HEAPSTER_KUBECONFIG_CONTENTS}" \ + --from-literal=cluster_autoscaler.kubeconfig="${CLUSTER_AUTOSCALER_KUBECONFIG_CONTENTS}" \ + --from-literal=npd.kubeconfig="${NPD_KUBECONFIG_CONTENTS}" \ + --from-literal=dns.kubeconfig="${KUBE_DNS_KUBECONFIG_CONTENTS}" + + # Create addon pods. + # Heapster. + mkdir -p "${RESOURCE_DIRECTORY}/addons" + sed "s/{{MASTER_IP}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/heapster_template.json" > "${RESOURCE_DIRECTORY}/addons/heapster.json" + metrics_mem_per_node=4 + metrics_mem=$((200 + metrics_mem_per_node*NUM_NODES)) + sed -i'' -e "s/{{METRICS_MEM}}/${metrics_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json" + metrics_cpu_per_node_numerator=${NUM_NODES} + metrics_cpu_per_node_denominator=2 + metrics_cpu=$((80 + metrics_cpu_per_node_numerator / metrics_cpu_per_node_denominator)) + sed -i'' -e "s/{{METRICS_CPU}}/${metrics_cpu}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json" + eventer_mem_per_node=500 + eventer_mem=$((200 * 1024 + eventer_mem_per_node*NUM_NODES)) + sed -i'' -e "s/{{EVENTER_MEM}}/${eventer_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json" + + # Cluster Autoscaler. + if [[ "${ENABLE_KUBEMARK_CLUSTER_AUTOSCALER:-}" == "true" ]]; then + echo "Setting up Cluster Autoscaler" + KUBEMARK_AUTOSCALER_MIG_NAME="${KUBEMARK_AUTOSCALER_MIG_NAME:-${NODE_INSTANCE_PREFIX}-group}" + KUBEMARK_AUTOSCALER_MIN_NODES="${KUBEMARK_AUTOSCALER_MIN_NODES:-0}" + KUBEMARK_AUTOSCALER_MAX_NODES="${KUBEMARK_AUTOSCALER_MAX_NODES:-10}" + NUM_NODES=${KUBEMARK_AUTOSCALER_MAX_NODES} + echo "Setting maximum cluster size to ${NUM_NODES}." + KUBEMARK_MIG_CONFIG="autoscaling.k8s.io/nodegroup: ${KUBEMARK_AUTOSCALER_MIG_NAME}" + sed "s/{{master_ip}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/cluster-autoscaler_template.json" > "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json" + sed -i'' -e "s/{{kubemark_autoscaler_mig_name}}/${KUBEMARK_AUTOSCALER_MIG_NAME}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json" + sed -i'' -e "s/{{kubemark_autoscaler_min_nodes}}/${KUBEMARK_AUTOSCALER_MIN_NODES}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json" + sed -i'' -e "s/{{kubemark_autoscaler_max_nodes}}/${KUBEMARK_AUTOSCALER_MAX_NODES}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json" + fi + + # Kube DNS. + if [[ "${ENABLE_KUBEMARK_KUBE_DNS:-}" == "true" ]]; then + echo "Setting up kube-dns" + sed "s/{{dns_domain}}/${KUBE_DNS_DOMAIN}/g" "${RESOURCE_DIRECTORY}/kube_dns_template.yaml" > "${RESOURCE_DIRECTORY}/addons/kube_dns.yaml" + fi + + "${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/addons" --namespace="kubemark" + + # Create the replication controller for hollow-nodes. + # We allow to override the NUM_REPLICAS when running Cluster Autoscaler. + NUM_REPLICAS=${NUM_REPLICAS:-${NUM_NODES}} + sed "s/{{numreplicas}}/${NUM_REPLICAS}/g" "${RESOURCE_DIRECTORY}/hollow-node_template.yaml" > "${RESOURCE_DIRECTORY}/hollow-node.yaml" + proxy_cpu=20 + if [ "${NUM_NODES}" -gt 1000 ]; then + proxy_cpu=50 + fi + proxy_mem_per_node=50 + proxy_mem=$((100 * 1024 + proxy_mem_per_node*NUM_NODES)) + sed -i'' -e "s/{{HOLLOW_PROXY_CPU}}/${proxy_cpu}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" + sed -i'' -e "s/{{HOLLOW_PROXY_MEM}}/${proxy_mem}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" + sed -i'' -e "s'{{kubemark_image_registry}}'${KUBEMARK_IMAGE_REGISTRY}'g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" + sed -i'' -e "s/{{kubemark_image_tag}}/${KUBEMARK_IMAGE_TAG}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" + sed -i'' -e "s/{{master_ip}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" + sed -i'' -e "s/{{hollow_kubelet_params}}/${HOLLOW_KUBELET_TEST_ARGS}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" + sed -i'' -e "s/{{hollow_proxy_params}}/${HOLLOW_PROXY_TEST_ARGS}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" + sed -i'' -e "s'{{kubemark_mig_config}}'${KUBEMARK_MIG_CONFIG:-}'g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" + "${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/hollow-node.yaml" --namespace="kubemark" + + echo "Created secrets, configMaps, replication-controllers required for hollow-nodes." +} + +# Wait until all hollow-nodes are running or there is a timeout. +function wait-for-hollow-nodes-to-run-or-timeout { + echo -n "Waiting for all hollow-nodes to become Running" + start=$(date +%s) + nodes=$("${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node 2> /dev/null) || true + ready=$(($(echo "${nodes}" | grep -vc "NotReady") - 1)) + + until [[ "${ready}" -ge "${NUM_REPLICAS}" ]]; do + echo -n "." + sleep 1 + now=$(date +%s) + # Fail it if it already took more than 30 minutes. + if [ $((now - start)) -gt 1800 ]; then + echo "" + # shellcheck disable=SC2154 # Color defined in sourced script + echo -e "${color_red} Timeout waiting for all hollow-nodes to become Running. ${color_norm}" + # Try listing nodes again - if it fails it means that API server is not responding + if "${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node &> /dev/null; then + echo "Found only ${ready} ready hollow-nodes while waiting for ${NUM_NODES}." + else + echo "Got error while trying to list hollow-nodes. Probably API server is down." + fi + pods=$("${KUBECTL}" get pods -l name=hollow-node --namespace=kubemark) || true + running=$(($(echo "${pods}" | grep -c "Running"))) + echo "${running} hollow-nodes are reported as 'Running'" + not_running=$(($(echo "${pods}" | grep -vc "Running") - 1)) + echo "${not_running} hollow-nodes are reported as NOT 'Running'" + echo "${pods}" | grep -v Running + exit 1 + fi + nodes=$("${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node 2> /dev/null) || true + ready=$(($(echo "${nodes}" | grep -vc "NotReady") - 1)) + done + # shellcheck disable=SC2154 # Color defined in sourced script + echo -e "${color_green} Done!${color_norm}" +} + +############################### Main Function ######################################## +detect-project &> /dev/null +find-release-tars + +# We need master IP to generate PKI and kubeconfig for cluster. +get-or-create-master-ip +generate-pki-config +write-local-kubeconfig + +# Setup for master. +function start-master { + # shellcheck disable=SC2154 # Color defined in sourced script + echo -e "${color_yellow}STARTING SETUP FOR MASTER${color_norm}" + create-master-environment-file + create-master-instance-with-resources + wait-for-master-reachability + write-pki-config-to-master + copy-resource-files-to-master + start-master-components +} +start-master & + +# Setup for hollow-nodes. +function start-hollow-nodes { + echo -e "${color_yellow}STARTING SETUP FOR HOLLOW-NODES${color_norm}" + create-and-upload-hollow-node-image + create-kube-hollow-node-resources + wait-for-hollow-nodes-to-run-or-timeout +} +start-hollow-nodes & + +wait +echo "" +echo "Master IP: ${MASTER_IP}" +echo "Password to kubemark master: ${KUBE_PASSWORD}" +echo "Kubeconfig for kubemark master is written in ${LOCAL_KUBECONFIG}" diff --git a/vendor/k8s.io/kubernetes/test/kubemark/stop-kubemark.sh b/vendor/k8s.io/kubernetes/test/kubemark/stop-kubemark.sh new file mode 100755 index 000000000..75ef920b2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/kubemark/stop-kubemark.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash + +# Copyright 2015 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Script that destroys Kubemark cluster and deletes all master resources. + +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../.. + +source "${KUBE_ROOT}/test/kubemark/skeleton/util.sh" +source "${KUBE_ROOT}/test/kubemark/cloud-provider-config.sh" +source "${KUBE_ROOT}/test/kubemark/${CLOUD_PROVIDER}/util.sh" +source "${KUBE_ROOT}/cluster/kubemark/${CLOUD_PROVIDER}/config-default.sh" + +if [[ -f "${KUBE_ROOT}/test/kubemark/${CLOUD_PROVIDER}/shutdown.sh" ]] ; then + source "${KUBE_ROOT}/test/kubemark/${CLOUD_PROVIDER}/shutdown.sh" +fi + +source "${KUBE_ROOT}/cluster/kubemark/util.sh" + +KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh" +KUBEMARK_DIRECTORY="${KUBE_ROOT}/test/kubemark" +RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources" + +detect-project &> /dev/null + +"${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/addons" &> /dev/null || true +"${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/hollow-node.yaml" &> /dev/null || true +"${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/kubemark-ns.json" &> /dev/null || true + +rm -rf "${RESOURCE_DIRECTORY}/addons" \ + "${RESOURCE_DIRECTORY}/kubeconfig.kubemark" \ + "${RESOURCE_DIRECTORY}/hollow-node.yaml" \ + "${RESOURCE_DIRECTORY}/kubemark-master-env.sh" &> /dev/null || true + +delete-master-instance-and-resources From b79caec5ddcd95e527a83eed1edd7781f5de6d5c Mon Sep 17 00:00:00 2001 From: Rajadeepan Date: Mon, 1 Apr 2019 03:20:02 +0000 Subject: [PATCH 2/4] Add density test case --- Gopkg.lock | 7 +- test/e2e/benchmark.go | 282 ++++++++++++++++++ test/e2e/metric_util.go | 105 +++++++ .../k8s.io/kubernetes/test/e2e/perftype/BUILD | 25 ++ .../kubernetes/test/e2e/perftype/perftype.go | 53 ++++ .../k8s.io/kubernetes/test/utils/image/BUILD | 28 ++ .../k8s.io/kubernetes/test/utils/image/OWNERS | 7 + .../kubernetes/test/utils/image/manifest.go | 149 +++++++++ 8 files changed, 655 insertions(+), 1 deletion(-) create mode 100644 test/e2e/benchmark.go create mode 100644 test/e2e/metric_util.go create mode 100644 vendor/k8s.io/kubernetes/test/e2e/perftype/BUILD create mode 100644 vendor/k8s.io/kubernetes/test/e2e/perftype/perftype.go create mode 100644 vendor/k8s.io/kubernetes/test/utils/image/BUILD create mode 100644 vendor/k8s.io/kubernetes/test/utils/image/OWNERS create mode 100644 vendor/k8s.io/kubernetes/test/utils/image/manifest.go diff --git a/Gopkg.lock b/Gopkg.lock index 702c8f3e8..79c400ffd 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -921,7 +921,7 @@ revision = "ced9eb3070a5f1c548ef46e8dfe2a97c208d9f03" [[projects]] - digest = "1:9606efb9b925b1dc2ca37607033c62f13032d91031fd168bb34847a5aecb9c39" + digest = "1:3bb4371ddb40e33a14307f75e57bad6d5664b82421d89897ca97065f4780d48e" name = "k8s.io/kubernetes" packages = [ "pkg/api/legacyscheme", @@ -1060,6 +1060,8 @@ "pkg/volume/util/recyclerclient", "pkg/volume/util/types", "pkg/volume/util/volumepathhandler", + "test/e2e/perftype", + "test/utils/image", ] pruneopts = "UT" revision = "cff46ab41ff0bb44d8584413b598ad8360ec1def" @@ -1110,6 +1112,7 @@ "k8s.io/apimachinery/pkg/api/meta", "k8s.io/apimachinery/pkg/api/resource", "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/fields", "k8s.io/apimachinery/pkg/labels", "k8s.io/apimachinery/pkg/runtime", "k8s.io/apimachinery/pkg/runtime/schema", @@ -1162,6 +1165,8 @@ "k8s.io/kubernetes/pkg/scheduler/api", "k8s.io/kubernetes/pkg/scheduler/cache", "k8s.io/kubernetes/pkg/scheduler/volumebinder", + "k8s.io/kubernetes/test/e2e/perftype", + "k8s.io/kubernetes/test/utils/image", ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/test/e2e/benchmark.go b/test/e2e/benchmark.go new file mode 100644 index 000000000..8a713c4f4 --- /dev/null +++ b/test/e2e/benchmark.go @@ -0,0 +1,282 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "io/ioutil" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "log" + "os" + "path" + "sort" + "strconv" + "sync" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + con "context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + imageutils "k8s.io/kubernetes/test/utils/image" +) + +const ( + MinPodStartupMeasurements = 30 + TotalPodCount = 100 +) + +var _ = Describe("Job E2E Test", func() { + // This test case requires a kubemark cluster so by default added as pending + // To run the test case explicitly ask for it by setting the Focus for the test case + PIt("Schedule Density Job", func() { + context := initKubemarkDensityTestContext() + defer cleanupDensityTestContext(context) + + _, pg := createDensityJob(context, &jobSpec{ + name: "qj-1", + tasks: []taskSpec{ + { + img: "busybox", + req: smallCPU, + min: TotalPodCount, + rep: TotalPodCount, + }, + }, + }) + + err := waitDensityTasksReady(context, pg, TotalPodCount) + checkError(context, err) + + nodeCount := 0 + missingMeasurements := 0 + nodes := getAllWorkerNodes(context) + nodeCount = len(nodes) + + latencyPodsIterations := (MinPodStartupMeasurements + nodeCount - 1) / nodeCount + By(fmt.Sprintf("Scheduling additional %d Pods to measure startup latencies", latencyPodsIterations*nodeCount)) + + createTimes := make(map[string]metav1.Time, 0) + nodeNames := make(map[string]string, 0) + scheduleTimes := make(map[string]metav1.Time, 0) + runTimes := make(map[string]metav1.Time, 0) + watchTimes := make(map[string]metav1.Time, 0) + + var mutex sync.Mutex + checkPod := func(p *v1.Pod) { + mutex.Lock() + defer mutex.Unlock() + defer GinkgoRecover() + + if p.Status.Phase == v1.PodRunning { + if _, found := watchTimes[p.Name]; !found { + watchTimes[p.Name] = metav1.Now() + createTimes[p.Name] = p.CreationTimestamp + nodeNames[p.Name] = p.Spec.NodeName + var startTime metav1.Time + for _, cs := range p.Status.ContainerStatuses { + if cs.State.Running != nil { + if startTime.Before(&cs.State.Running.StartedAt) { + startTime = cs.State.Running.StartedAt + } + } + } + if startTime != metav1.NewTime(time.Time{}) { + runTimes[p.Name] = startTime + } else { + fmt.Println("Pod is reported to be running, but none of its containers is", p.Name) + } + } + } + } + + additionalPodsPrefix := "density-latency-pod" + stopCh := make(chan struct{}) + + nsName := context.namespace + _, controller := cache.NewInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}).String() + obj, err := context.kubeclient.CoreV1().Pods(nsName).List(options) + return runtime.Object(obj), err + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}).String() + return context.kubeclient.CoreV1().Pods(nsName).Watch(options) + }, + }, + &v1.Pod{}, + 0, + cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + p, ok := obj.(*v1.Pod) + if !ok { + fmt.Println("Failed to cast observed object to *v1.Pod.") + } + Expect(ok).To(Equal(true)) + go checkPod(p) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + p, ok := newObj.(*v1.Pod) + if !ok { + fmt.Println("Failed to cast observed object to *v1.Pod.") + } + Expect(ok).To(Equal(true)) + go checkPod(p) + }, + }, + ) + + go controller.Run(stopCh) + + for latencyPodsIteration := 0; latencyPodsIteration < latencyPodsIterations; latencyPodsIteration++ { + podIndexOffset := latencyPodsIteration * nodeCount + fmt.Println("Creating latency pods in range ", nodeCount, podIndexOffset+1, podIndexOffset+nodeCount) + + watchTimesLen := len(watchTimes) + + var wg sync.WaitGroup + wg.Add(nodeCount) + + cpuRequest := *resource.NewMilliQuantity(1, resource.DecimalSI) + memRequest := *resource.NewQuantity(1, resource.DecimalSI) + + rcNameToNsMap := map[string]string{} + for i := 1; i <= nodeCount; i++ { + name := additionalPodsPrefix + "-" + strconv.Itoa(podIndexOffset+i) + nsName := context.namespace + rcNameToNsMap[name] = nsName + go createRunningPodFromRC(&wg, context, name, imageutils.GetPauseImageName(), additionalPodsPrefix, cpuRequest, memRequest) + time.Sleep(200 * time.Millisecond) + } + wg.Wait() + + By("Waiting for all Pods begin observed by the watch...") + waitTimeout := 10 * time.Minute + for start := time.Now(); len(watchTimes) < watchTimesLen+nodeCount; time.Sleep(10 * time.Second) { + if time.Since(start) < waitTimeout { + fmt.Println("Timeout reached waiting for all Pods being observed by the watch.") + } + } + + By("Removing additional replication controllers") + deleteRC := func(i int) { + defer GinkgoRecover() + name := additionalPodsPrefix + "-" + strconv.Itoa(podIndexOffset+i+1) + deleteReplicationController(context, name) + } + workqueue.ParallelizeUntil(con.TODO(), 25, nodeCount, deleteRC) + } + close(stopCh) + + nsName = context.namespace + //time.Sleep(1 * time.Minute) // sleep to be added for large number of pods + selector := fields.Set{ + "involvedObject.kind": "Pod", + "involvedObject.namespace": nsName, + "source": "kube-batch", + }.AsSelector().String() + options := metav1.ListOptions{FieldSelector: selector} + schedEvents, _ := context.kubeclient.CoreV1().Events(nsName).List(options) + for k := range createTimes { + for _, event := range schedEvents.Items { + if event.InvolvedObject.Name == k { + scheduleTimes[k] = event.FirstTimestamp + break + } + } + } + + scheduleLag := make([]PodLatencyData, 0) + startupLag := make([]PodLatencyData, 0) + watchLag := make([]PodLatencyData, 0) + schedToWatchLag := make([]PodLatencyData, 0) + e2eLag := make([]PodLatencyData, 0) + + for name, create := range createTimes { + sched, ok := scheduleTimes[name] + if !ok { + fmt.Println("Failed to find schedule time for ", name) + missingMeasurements++ + } + run, ok := runTimes[name] + if !ok { + fmt.Println("Failed to find run time for", name) + missingMeasurements++ + } + watch, ok := watchTimes[name] + if !ok { + fmt.Println("Failed to find watch time for", name) + missingMeasurements++ + } + node, ok := nodeNames[name] + if !ok { + fmt.Println("Failed to find node for", name) + missingMeasurements++ + } + scheduleLag = append(scheduleLag, PodLatencyData{Name: name, Node: node, Latency: sched.Time.Sub(create.Time)}) + startupLag = append(startupLag, PodLatencyData{Name: name, Node: node, Latency: run.Time.Sub(sched.Time)}) + watchLag = append(watchLag, PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(run.Time)}) + schedToWatchLag = append(schedToWatchLag, PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(sched.Time)}) + e2eLag = append(e2eLag, PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(create.Time)}) + } + + sort.Sort(LatencySlice(scheduleLag)) + sort.Sort(LatencySlice(startupLag)) + sort.Sort(LatencySlice(watchLag)) + sort.Sort(LatencySlice(schedToWatchLag)) + sort.Sort(LatencySlice(e2eLag)) + + PrintLatencies(scheduleLag, "worst create-to-schedule latencies") + PrintLatencies(startupLag, "worst schedule-to-run latencies") + PrintLatencies(watchLag, "worst run-to-watch latencies") + PrintLatencies(schedToWatchLag, "worst schedule-to-watch latencies") + PrintLatencies(e2eLag, "worst e2e latencies") + + //// Capture latency metrics related to pod-startup. + podStartupLatency := &PodStartupLatency{ + CreateToScheduleLatency: ExtractLatencyMetrics(scheduleLag), + ScheduleToRunLatency: ExtractLatencyMetrics(startupLag), + RunToWatchLatency: ExtractLatencyMetrics(watchLag), + ScheduleToWatchLatency: ExtractLatencyMetrics(schedToWatchLag), + E2ELatency: ExtractLatencyMetrics(e2eLag), + } + + fmt.Println(podStartupLatency.PrintJSON()) + + dir, err := os.Getwd() + if err != nil { + log.Fatal(err) + } + fmt.Println(dir) + + filePath := path.Join(dir, "MetricsForE2ESuite_"+time.Now().Format(time.RFC3339)+".json") + if err := ioutil.WriteFile(filePath, []byte(podStartupLatency.PrintJSON()), 0644); err != nil { + fmt.Errorf("error writing to %q: %v", filePath, err) + } + + }) +}) diff --git a/test/e2e/metric_util.go b/test/e2e/metric_util.go new file mode 100644 index 000000000..5a16a6022 --- /dev/null +++ b/test/e2e/metric_util.go @@ -0,0 +1,105 @@ +package e2e + +import ( + "bytes" + "encoding/json" + "fmt" + "k8s.io/kubernetes/test/e2e/perftype" + "math" + "time" +) + +///// PodLatencyData encapsulates pod startup latency information. +type PodLatencyData struct { + ///// Name of the pod + Name string + ///// Node this pod was running on + Node string + ///// Latency information related to pod startuptime + Latency time.Duration +} + +type LatencySlice []PodLatencyData + +func (a LatencySlice) Len() int { return len(a) } +func (a LatencySlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a LatencySlice) Less(i, j int) bool { return a[i].Latency < a[j].Latency } + +func ExtractLatencyMetrics(latencies []PodLatencyData) LatencyMetric { + length := len(latencies) + perc50 := latencies[int(math.Ceil(float64(length*50)/100))-1].Latency + perc90 := latencies[int(math.Ceil(float64(length*90)/100))-1].Latency + perc99 := latencies[int(math.Ceil(float64(length*99)/100))-1].Latency + perc100 := latencies[length-1].Latency + return LatencyMetric{Perc50: perc50, Perc90: perc90, Perc99: perc99, Perc100: perc100} +} + +//// Dashboard metrics +type LatencyMetric struct { + Perc50 time.Duration `json:"Perc50"` + Perc90 time.Duration `json:"Perc90"` + Perc99 time.Duration `json:"Perc99"` + Perc100 time.Duration `json:"Perc100"` +} + +type PodStartupLatency struct { + CreateToScheduleLatency LatencyMetric `json:"createToScheduleLatency"` + ScheduleToRunLatency LatencyMetric `json:"scheduleToRunLatency"` + RunToWatchLatency LatencyMetric `json:"runToWatchLatency"` + ScheduleToWatchLatency LatencyMetric `json:"scheduleToWatchLatency"` + E2ELatency LatencyMetric `json:"e2eLatency"` +} + +func (l *PodStartupLatency) PrintHumanReadable() string { + return PrettyPrintJSON(l) +} + +func (l *PodStartupLatency) PrintJSON() string { + return PrettyPrintJSON(PodStartupLatencyToPerfData(l)) +} + +func PrettyPrintJSON(metrics interface{}) string { + output := &bytes.Buffer{} + if err := json.NewEncoder(output).Encode(metrics); err != nil { + fmt.Println("Error building encoder:", err) + return "" + } + formatted := &bytes.Buffer{} + if err := json.Indent(formatted, output.Bytes(), "", " "); err != nil { + fmt.Println("Error indenting:", err) + return "" + } + return string(formatted.Bytes()) +} + +//// PodStartupLatencyToPerfData transforms PodStartupLatency to PerfData. +func PodStartupLatencyToPerfData(latency *PodStartupLatency) *perftype.PerfData { + perfData := &perftype.PerfData{Version: currentApiCallMetricsVersion} + perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.CreateToScheduleLatency, "create_to_schedule")) + perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.ScheduleToRunLatency, "schedule_to_run")) + perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.RunToWatchLatency, "run_to_watch")) + perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.ScheduleToWatchLatency, "schedule_to_watch")) + perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.E2ELatency, "pod_startup")) + return perfData +} + +func latencyToPerfData(l LatencyMetric, name string) perftype.DataItem { + return perftype.DataItem{ + Data: map[string]float64{ + "Perc50": float64(l.Perc50) / 1000000, //// us -> ms + "Perc90": float64(l.Perc90) / 1000000, + "Perc99": float64(l.Perc99) / 1000000, + "Perc100": float64(l.Perc100) / 1000000, + }, + Unit: "ms", + Labels: map[string]string{ + "Metric": name, + }, + } +} + +func PrintLatencies(latencies []PodLatencyData, header string) { + metrics := ExtractLatencyMetrics(latencies) + fmt.Println("", header, latencies[(len(latencies)*9)/10:]) + fmt.Println("perc50:, perc90:, perc99:", metrics.Perc50, metrics.Perc90, metrics.Perc99) +} diff --git a/vendor/k8s.io/kubernetes/test/e2e/perftype/BUILD b/vendor/k8s.io/kubernetes/test/e2e/perftype/BUILD new file mode 100644 index 000000000..81ea0ff00 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/e2e/perftype/BUILD @@ -0,0 +1,25 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["perftype.go"], + importpath = "k8s.io/kubernetes/test/e2e/perftype", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/test/e2e/perftype/perftype.go b/vendor/k8s.io/kubernetes/test/e2e/perftype/perftype.go new file mode 100644 index 000000000..48f819ec2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/e2e/perftype/perftype.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package perftype + +// TODO(random-liu): Replace this with prometheus' data model. + +// The following performance data structures are generalized and well-formatted. +// They can be pretty printed in json format and be analyzed by other performance +// analyzing tools, such as Perfdash (k8s.io/contrib/perfdash). + +// DataItem is the data point. +type DataItem struct { + // Data is a map from bucket to real data point (e.g. "Perc90" -> 23.5). Notice + // that all data items with the same label conbination should have the same buckets. + Data map[string]float64 `json:"data"` + // Unit is the data unit. Notice that all data items with the same label combination + // should have the same unit. + Unit string `json:"unit"` + // Labels is the labels of the data item. + Labels map[string]string `json:"labels,omitempty"` +} + +// PerfData contains all data items generated in current test. +type PerfData struct { + // Version is the version of the metrics. The metrics consumer could use the version + // to detect metrics version change and decide what version to support. + Version string `json:"version"` + DataItems []DataItem `json:"dataItems"` + // Labels is the labels of the dataset. + Labels map[string]string `json:"labels,omitempty"` +} + +// PerfResultTag is the prefix of generated perfdata. Analyzing tools can find the perf result +// with this tag. +const PerfResultTag = "[Result:Performance]" + +// PerfResultEnd is the end of generated perfdata. Analyzing tools can find the end of the perf +// result with this tag. +const PerfResultEnd = "[Finish:Performance]" diff --git a/vendor/k8s.io/kubernetes/test/utils/image/BUILD b/vendor/k8s.io/kubernetes/test/utils/image/BUILD new file mode 100644 index 000000000..4693827e8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/utils/image/BUILD @@ -0,0 +1,28 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["manifest.go"], + importpath = "k8s.io/kubernetes/test/utils/image", + deps = [ + "//vendor/gopkg.in/yaml.v2:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/test/utils/image/OWNERS b/vendor/k8s.io/kubernetes/test/utils/image/OWNERS new file mode 100644 index 000000000..1e4e74e7d --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/utils/image/OWNERS @@ -0,0 +1,7 @@ +reviewers: + - luxas + - mkumatag + - ixdy +approvers: + - luxas + - ixdy diff --git a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go new file mode 100644 index 000000000..2e5015bd3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go @@ -0,0 +1,149 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package image + +import ( + "fmt" + "io/ioutil" + "os" + + yaml "gopkg.in/yaml.v2" +) + +// RegistryList holds public and private image registries +type RegistryList struct { + DockerLibraryRegistry string `yaml:"dockerLibraryRegistry"` + E2eRegistry string `yaml:"e2eRegistry"` + GcRegistry string `yaml:"gcRegistry"` + PrivateRegistry string `yaml:"privateRegistry"` + SampleRegistry string `yaml:"sampleRegistry"` +} + +// Config holds an images registry, name, and version +type Config struct { + registry string + name string + version string +} + +// SetRegistry sets an image registry in a Config struct +func (i *Config) SetRegistry(registry string) { + i.registry = registry +} + +// SetName sets an image name in a Config struct +func (i *Config) SetName(name string) { + i.name = name +} + +// SetVersion sets an image version in a Config struct +func (i *Config) SetVersion(version string) { + i.version = version +} + +func initReg() RegistryList { + registry := RegistryList{ + DockerLibraryRegistry: "docker.io/library", + E2eRegistry: "gcr.io/kubernetes-e2e-test-images", + GcRegistry: "k8s.gcr.io", + PrivateRegistry: "gcr.io/k8s-authenticated-test", + SampleRegistry: "gcr.io/google-samples", + } + repoList := os.Getenv("KUBE_TEST_REPO_LIST") + if repoList == "" { + return registry + } + + fileContent, err := ioutil.ReadFile(repoList) + if err != nil { + panic(fmt.Errorf("Error reading '%v' file contents: %v", repoList, err)) + } + + err = yaml.Unmarshal(fileContent, ®istry) + if err != nil { + panic(fmt.Errorf("Error unmarshalling '%v' YAML file: %v", repoList, err)) + } + return registry +} + +var ( + registry = initReg() + dockerLibraryRegistry = registry.DockerLibraryRegistry + e2eRegistry = registry.E2eRegistry + gcRegistry = registry.GcRegistry + // PrivateRegistry is an image repository that requires authentication + PrivateRegistry = registry.PrivateRegistry + sampleRegistry = registry.SampleRegistry +) + +// Preconfigured image configs +var ( + CRDConversionWebhook = Config{e2eRegistry, "crd-conversion-webhook", "1.13rev2"} + AdmissionWebhook = Config{e2eRegistry, "webhook", "1.13v1"} + APIServer = Config{e2eRegistry, "sample-apiserver", "1.10"} + AppArmorLoader = Config{e2eRegistry, "apparmor-loader", "1.0"} + BusyBox = Config{dockerLibraryRegistry, "busybox", "1.29"} + CheckMetadataConcealment = Config{e2eRegistry, "metadata-concealment", "1.1.1"} + CudaVectorAdd = Config{e2eRegistry, "cuda-vector-add", "1.0"} + Dnsutils = Config{e2eRegistry, "dnsutils", "1.1"} + EchoServer = Config{e2eRegistry, "echoserver", "2.2"} + EntrypointTester = Config{e2eRegistry, "entrypoint-tester", "1.0"} + Fakegitserver = Config{e2eRegistry, "fakegitserver", "1.0"} + GBFrontend = Config{sampleRegistry, "gb-frontend", "v6"} + GBRedisSlave = Config{sampleRegistry, "gb-redisslave", "v3"} + Hostexec = Config{e2eRegistry, "hostexec", "1.1"} + IpcUtils = Config{e2eRegistry, "ipc-utils", "1.0"} + Iperf = Config{e2eRegistry, "iperf", "1.0"} + JessieDnsutils = Config{e2eRegistry, "jessie-dnsutils", "1.0"} + Kitten = Config{e2eRegistry, "kitten", "1.0"} + Liveness = Config{e2eRegistry, "liveness", "1.0"} + LogsGenerator = Config{e2eRegistry, "logs-generator", "1.0"} + Mounttest = Config{e2eRegistry, "mounttest", "1.0"} + MounttestUser = Config{e2eRegistry, "mounttest-user", "1.0"} + Nautilus = Config{e2eRegistry, "nautilus", "1.0"} + Net = Config{e2eRegistry, "net", "1.0"} + Netexec = Config{e2eRegistry, "netexec", "1.1"} + Nettest = Config{e2eRegistry, "nettest", "1.0"} + Nginx = Config{dockerLibraryRegistry, "nginx", "1.14-alpine"} + NginxNew = Config{dockerLibraryRegistry, "nginx", "1.15-alpine"} + Nonewprivs = Config{e2eRegistry, "nonewprivs", "1.0"} + NoSnatTest = Config{e2eRegistry, "no-snat-test", "1.0"} + NoSnatTestProxy = Config{e2eRegistry, "no-snat-test-proxy", "1.0"} + // Pause - when these values are updated, also update cmd/kubelet/app/options/container_runtime.go + Pause = Config{gcRegistry, "pause", "3.1"} + Porter = Config{e2eRegistry, "porter", "1.0"} + PortForwardTester = Config{e2eRegistry, "port-forward-tester", "1.0"} + Redis = Config{e2eRegistry, "redis", "1.0"} + ResourceConsumer = Config{e2eRegistry, "resource-consumer", "1.4"} + ResourceController = Config{e2eRegistry, "resource-consumer/controller", "1.0"} + ServeHostname = Config{e2eRegistry, "serve-hostname", "1.1"} + TestWebserver = Config{e2eRegistry, "test-webserver", "1.0"} + VolumeNFSServer = Config{e2eRegistry, "volume/nfs", "1.0"} + VolumeISCSIServer = Config{e2eRegistry, "volume/iscsi", "1.0"} + VolumeGlusterServer = Config{e2eRegistry, "volume/gluster", "1.0"} + VolumeRBDServer = Config{e2eRegistry, "volume/rbd", "1.0.1"} +) + +// GetE2EImage returns the fully qualified URI to an image (including version) +func GetE2EImage(image Config) string { + return fmt.Sprintf("%s/%s:%s", image.registry, image.name, image.version) +} + +// GetPauseImageName returns the pause image name with proper version +func GetPauseImageName() string { + return GetE2EImage(Pause) +} From 5cabcbb358c4e79971cb8a6016e933ae4830e6c4 Mon Sep 17 00:00:00 2001 From: TommyLike Date: Sat, 20 Apr 2019 14:08:03 +0800 Subject: [PATCH 3/4] Update testcases --- Makefile | 5 + test/e2e/{ => kube-batch}/benchmark.go | 7 +- test/e2e/{ => kube-batch}/metric_util.go | 0 test/e2e/kube-batch/util.go | 207 +++++++++++++++++++++++ test/kubemark/start-kubemark.sh | 2 +- 5 files changed, 217 insertions(+), 4 deletions(-) rename test/e2e/{ => kube-batch}/benchmark.go (97%) rename test/e2e/{ => kube-batch}/metric_util.go (100%) diff --git a/Makefile b/Makefile index 23c7f1082..d6e41c1e4 100644 --- a/Makefile +++ b/Makefile @@ -74,6 +74,11 @@ e2e-kind: vkctl images coverage: KUBE_COVER=y hack/make-rules/test.sh $(WHAT) $(TESTS) +benchmark: + test/kubemark/start-kubemark.sh + go test ./test/e2e -v -timeout 30m --ginkgo.focus="Feature:Performance" + test/kubemark/stop-kubemark.sh + clean: rm -rf _output/ rm -f kube-batch diff --git a/test/e2e/benchmark.go b/test/e2e/kube-batch/benchmark.go similarity index 97% rename from test/e2e/benchmark.go rename to test/e2e/kube-batch/benchmark.go index 8a713c4f4..fed9b1654 100644 --- a/test/e2e/benchmark.go +++ b/test/e2e/kube-batch/benchmark.go @@ -49,9 +49,10 @@ const ( ) var _ = Describe("Job E2E Test", func() { - // This test case requires a kubemark cluster so by default added as pending - // To run the test case explicitly ask for it by setting the Focus for the test case - PIt("Schedule Density Job", func() { + It("[Feature:Performance] Schedule Density Job", func() { + if getkubemarkConfigPath() == "" { + Skip("Performance test skipped since config file not found") + } context := initKubemarkDensityTestContext() defer cleanupDensityTestContext(context) diff --git a/test/e2e/metric_util.go b/test/e2e/kube-batch/metric_util.go similarity index 100% rename from test/e2e/metric_util.go rename to test/e2e/kube-batch/metric_util.go diff --git a/test/e2e/kube-batch/util.go b/test/e2e/kube-batch/util.go index 49feb7296..ef3d41e55 100644 --- a/test/e2e/kube-batch/util.go +++ b/test/e2e/kube-batch/util.go @@ -20,6 +20,7 @@ import ( "bytes" "encoding/json" "fmt" + "k8s.io/kubernetes/pkg/util/file" "os" "path/filepath" "text/template" @@ -44,13 +45,20 @@ import ( kbv1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" kbver "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned" kbapi "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api" + + . "github.com/onsi/ginkgo" + "sync" ) +const currentApiCallMetricsVersion = "v1" + var oneMinute = 1 * time.Minute +var tenMinute = 10 * time.Minute var halfCPU = v1.ResourceList{"cpu": resource.MustParse("500m")} var oneCPU = v1.ResourceList{"cpu": resource.MustParse("1000m")} var twoCPU = v1.ResourceList{"cpu": resource.MustParse("2000m")} +var smallCPU = v1.ResourceList{"cpu": resource.MustParse("2m")} const ( workerPriority = "worker-pri" @@ -816,3 +824,202 @@ func preparePatchBytesforNode(nodeName string, oldNode *v1.Node, newNode *v1.Nod return patchBytes, nil } + +func getkubemarkConfigPath() string { + wd, err := os.Getwd() + if err != nil { + return "" + } + configPath := filepath.Join(wd, "../kubemark/kubeconfig.kubemark") + exist, err := file.FileExists(configPath) + if err != nil || !exist { + return "" + } + return configPath +} + +func initKubemarkDensityTestContext() *context { + cxt := &context{ + namespace: "test", + queues: []string{"q1", "q2"}, + } + + configPath := getkubemarkConfigPath() + Expect(configPath).NotTo(Equal("")) + config, err := clientcmd.BuildConfigFromFlags("", configPath) + checkError(cxt, err) + + cxt.kbclient = kbver.NewForConfigOrDie(config) + cxt.kubeclient = kubernetes.NewForConfigOrDie(config) + + _, err = cxt.kubeclient.CoreV1().Namespaces().Create(&v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: cxt.namespace, + }, + }) + checkError(cxt, err) + + _, err = cxt.kubeclient.SchedulingV1beta1().PriorityClasses().Create(&schedv1.PriorityClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: masterPriority, + }, + Value: 100, + GlobalDefault: false, + }) + checkError(cxt, err) + + _, err = cxt.kubeclient.SchedulingV1beta1().PriorityClasses().Create(&schedv1.PriorityClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: workerPriority, + }, + Value: 1, + GlobalDefault: false, + }) + checkError(cxt, err) + + return cxt +} + +func cleanupDensityTestContext(cxt *context) { + foreground := metav1.DeletePropagationForeground + + err := cxt.kubeclient.CoreV1().Namespaces().Delete(cxt.namespace, &metav1.DeleteOptions{ + PropagationPolicy: &foreground, + }) + checkError(cxt, err) + + err = cxt.kubeclient.SchedulingV1beta1().PriorityClasses().Delete(masterPriority, &metav1.DeleteOptions{ + PropagationPolicy: &foreground, + }) + checkError(cxt, err) + + err = cxt.kubeclient.SchedulingV1beta1().PriorityClasses().Delete(workerPriority, &metav1.DeleteOptions{ + PropagationPolicy: &foreground, + }) + checkError(cxt, err) + + // Wait for namespace deleted. + err = wait.Poll(100*time.Millisecond, tenMinute, namespaceNotExist(cxt)) + checkError(cxt, err) +} + +func createDensityJob(context *context, job *jobSpec) ([]*batchv1.Job, *kbv1.PodGroup) { + var jobs []*batchv1.Job + var podgroup *kbv1.PodGroup + var min int32 + + ns := getNS(context, job) + + pg := &kbv1.PodGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: job.name, + Namespace: ns, + }, + Spec: kbv1.PodGroupSpec{ + MinMember: min, + Queue: job.queue, + PriorityClassName: job.pri, + }, + } + + if job.minMember != nil { + pg.Spec.MinMember = *job.minMember + } + + podgroup, err := context.kbclient.SchedulingV1alpha1().PodGroups(pg.Namespace).Create(pg) + checkError(context, err) + + for i, task := range job.tasks { + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", job.name, i), + Namespace: ns, + }, + Spec: batchv1.JobSpec{ + Parallelism: &task.rep, + Completions: &task.rep, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: task.labels, + Annotations: map[string]string{kbv1.GroupNameAnnotationKey: job.name}, + }, + Spec: v1.PodSpec{ + SchedulerName: "kube-batch", + RestartPolicy: v1.RestartPolicyOnFailure, + Containers: createContainers(task.img, task.req, task.hostport), + Affinity: task.affinity, + }, + }, + }, + } + + if len(task.pri) != 0 { + job.Spec.Template.Spec.PriorityClassName = task.pri + } + + job, err := context.kubeclient.BatchV1().Jobs(job.Namespace).Create(job) + checkError(context, err) + jobs = append(jobs, job) + + min = min + task.min + } + + return jobs, podgroup +} + +func waitDensityTasksReady(ctx *context, pg *kbv1.PodGroup, taskNum int) error { + return wait.Poll(100*time.Millisecond, tenMinute, taskPhase(ctx, pg, + []v1.PodPhase{v1.PodRunning, v1.PodSucceeded}, taskNum)) +} + +func createRunningPodFromRC(wg *sync.WaitGroup, context *context, name, image, podType string, cpuRequest, memRequest resource.Quantity) { + defer GinkgoRecover() + defer wg.Done() + labels := map[string]string{ + "type": podType, + "name": name, + } + rc := &v1.ReplicationController{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + }, + Spec: v1.ReplicationControllerSpec{ + Replicas: func(i int) *int32 { x := int32(i); return &x }(1), + Selector: labels, + Template: &v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + Annotations: map[string]string{"scheduling.k8s.io/group-name": "qj-1"}, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: name, + Image: "nginx", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: cpuRequest, + v1.ResourceMemory: memRequest, + }, + }, + }, + }, + DNSPolicy: v1.DNSDefault, + SchedulerName: "kube-batch", + }, + }, + }, + } + + _, err := context.kubeclient.CoreV1().ReplicationControllers(context.namespace).Create(rc) + checkError(context, err) + +} + +func deleteReplicationController(ctx *context, name string) error { + foreground := metav1.DeletePropagationForeground + return ctx.kubeclient.CoreV1().ReplicationControllers(ctx.namespace).Delete(name, &metav1.DeleteOptions{ + PropagationPolicy: &foreground, + }) +} diff --git a/test/kubemark/start-kubemark.sh b/test/kubemark/start-kubemark.sh index 211b5a70b..c134a0451 100755 --- a/test/kubemark/start-kubemark.sh +++ b/test/kubemark/start-kubemark.sh @@ -15,7 +15,7 @@ if [ ! -d "$KUBE_ROOT/_output" ]; then git clone https://github.com/kubernetes/kubernetes.git cd kubernetes make quick-release - mv _output/ $KUBE_ROOT + mv _output/ $KUBE_ROOT fi From 227f5f0739ba520e9346422b0a9ee032362dddb9 Mon Sep 17 00:00:00 2001 From: TommyLike Date: Sat, 20 Apr 2019 14:24:07 +0800 Subject: [PATCH 4/4] Resolve conflict --- Makefile | 3 ++- test/e2e/kube-batch/benchmark.go | 4 ++-- test/e2e/kube-batch/metric_util.go | 18 +++++++++++++++++- 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index d6e41c1e4..551ed7789 100644 --- a/Makefile +++ b/Makefile @@ -75,8 +75,9 @@ coverage: KUBE_COVER=y hack/make-rules/test.sh $(WHAT) $(TESTS) benchmark: + #NOTE: !Only GCE platform is supported now test/kubemark/start-kubemark.sh - go test ./test/e2e -v -timeout 30m --ginkgo.focus="Feature:Performance" + go test ./test/e2e/kube-batch -v -timeout 30m --ginkgo.focus="Feature:Performance" test/kubemark/stop-kubemark.sh clean: diff --git a/test/e2e/kube-batch/benchmark.go b/test/e2e/kube-batch/benchmark.go index fed9b1654..9b0012cb6 100644 --- a/test/e2e/kube-batch/benchmark.go +++ b/test/e2e/kube-batch/benchmark.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package e2e +package kube_batch import ( "fmt" diff --git a/test/e2e/kube-batch/metric_util.go b/test/e2e/kube-batch/metric_util.go index 5a16a6022..d118b7878 100644 --- a/test/e2e/kube-batch/metric_util.go +++ b/test/e2e/kube-batch/metric_util.go @@ -1,4 +1,20 @@ -package e2e +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube_batch import ( "bytes"