Skip to content

Commit

Permalink
support eks provider in e2e
Browse files Browse the repository at this point in the history
  • Loading branch information
cofyc committed Feb 19, 2020
1 parent 3d54d47 commit 2554f4a
Show file tree
Hide file tree
Showing 4 changed files with 182 additions and 41 deletions.
117 changes: 83 additions & 34 deletions hack/e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -46,31 +46,34 @@ Usage: hack/e2e.sh [-h] -- [extra test args]
Environments:
PROVIDER Kubernetes provider, e.g. kind, gke, defaults: kind
DOCKER_REGISTRY image docker registry
IMAGE_TAG image tag
CLUSTER the name of e2e cluster, defaults: tidb-operator
KUBECONFIG path to the kubeconfig file, defaults: ~/.kube/config
SKIP_BUILD skip building binaries
SKIP_IMAGE_BUILD skip build and push images
SKIP_UP skip starting the cluster
SKIP_DOWN skip shutting down the cluster
SKIP_TEST skip running the test
KUBE_VERSION the version of Kubernetes to test against
KUBE_WORKERS the number of worker nodes (excludes master nodes), defaults: 3
DOCKER_IO_MIRROR configure mirror for docker.io
GCR_IO_MIRROR configure mirror for gcr.io
QUAY_IO_MIRROR configure mirror for quay.io
KIND_DATA_HOSTPATH (kind only) the host path of data directory for kind cluster, defaults: none
GCP_PROJECT (gke only) the GCP project to run in
GCP_SERVICE_ACCOUNT (gke only) the GCP service account to use
GCP_REGION (gke only) the GCP region, if specified a regional cluster is creaetd
GCP_ZONE (gke only) the GCP zone, if specified a zonal cluster is created
GCP_SSH_PRIVATE_KEY (gke only) the path to the private ssh key
GCP_SSH_PUBLIC_KEY (gke only) the path to the public ssh key
GINKGO_NODES ginkgo nodes to run specs, defaults: 1
GINKGO_PARALLEL if set to `y`, will run specs in parallel, the number of nodes will be the number of cpus
GINKGO_NO_COLOR if set to `y`, suppress color output in default reporter
PROVIDER Kubernetes provider, e.g. kind, gke, eks, defaults: kind
DOCKER_REGISTRY image docker registry
IMAGE_TAG image tag
CLUSTER the name of e2e cluster, defaults: tidb-operator
KUBECONFIG path to the kubeconfig file, defaults: ~/.kube/config
SKIP_BUILD skip building binaries
SKIP_IMAGE_BUILD skip build and push images
SKIP_UP skip starting the cluster
SKIP_DOWN skip shutting down the cluster
SKIP_TEST skip running the test
KUBE_VERSION the version of Kubernetes to test against
KUBE_WORKERS the number of worker nodes (excludes master nodes), defaults: 3
DOCKER_IO_MIRROR configure mirror for docker.io
GCR_IO_MIRROR configure mirror for gcr.io
QUAY_IO_MIRROR configure mirror for quay.io
KIND_DATA_HOSTPATH (kind only) the host path of data directory for kind cluster, defaults: none
GCP_PROJECT (gke only) the GCP project to run in
GCP_SERVICE_ACCOUNT (gke only) the GCP service account to use
GCP_REGION (gke only) the GCP region, if specified a regional cluster is creaetd
GCP_ZONE (gke only) the GCP zone, if specified a zonal cluster is created
GCP_SSH_PRIVATE_KEY (gke only) the path to the private ssh key
GCP_SSH_PUBLIC_KEY (gke only) the path to the public ssh key
AWS_ACCESS_KEY_ID (eks only) the aws access key id
AWS_SECRET_ACCESS_KEY (eks only) the aws secret access key
AWS_REGION (eks only) the aws region
GINKGO_NODES ginkgo nodes to run specs, defaults: 1
GINKGO_PARALLEL if set to `y`, will run specs in parallel, the number of nodes will be the number of cpus
GINKGO_NO_COLOR if set to `y`, suppress color output in default reporter
Examples:
Expand Down Expand Up @@ -103,10 +106,7 @@ Examples:
5) run e2e with gke provider locally
You need install Google Cloud SDK first, then prepare GCP servie account
and configure ssh key pairs
GCP service account must be created with following permissions:
You need prepare GCP service account with the following permissions:
- Compute Network Admin
- Kubernetes Engine Admin
Expand All @@ -121,11 +121,32 @@ Examples:
Then run with following additional GCP-specific environments:
export GCP_PROJECT=<project>
export GCP_SERVICE_ACCOUNT=<path-to-gcp-service-account>
export GCP_ZONE=us-central1-b
export GCP_PROJECT=<project>
export GCP_SERVICE_ACCOUNT=<path-to-gcp-service-account>
export GCP_ZONE=us-central1-b
PROVIDER=gke ./hack/e2e.sh -- <e2e args>
If you run the outside of the dev containter started by
./hack/run-in-container.sh, Google Cloud SDK must be installed on you
machine.
6) run e2e with eks provider locally
You need configure your aws credential and region or set it via following
environments:
export AWS_ACCESS_KEY_ID=<your-aws-access-key-id>
export AWS_SECRET_ACCESS_KEY=<your-aws-secret-key-id>
export AWS_REGION=<your-aws-region>
then run e2e with eks provider:
./hack/e2e.sh -- <e2e args>
PROVIDER=eks ./hack/e2e.sh -- <e2e args>
If you run the outside of the dev containter started by
./hack/run-in-container.sh, AWS CLI must be installed on you
machine.
EOF

Expand Down Expand Up @@ -166,6 +187,9 @@ GCP_REGION=${GCP_REGION:-}
GCP_ZONE=${GCP_ZONE:-}
GCP_SSH_PRIVATE_KEY=${GCP_SSH_PRIVATE_KEY:-}
GCP_SSH_PUBLIC_KEY=${GCP_SSH_PUBLIC_KEY:-}
AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID:-}
AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY:-}
AWS_REGION=${AWS_REGION:-}
KUBE_VERSION=${KUBE_VERSION:-v1.12.10}
KUBE_WORKERS=${KUBE_WORKERS:-3}
DOCKER_IO_MIRROR=${DOCKER_IO_MIRROR:-}
Expand All @@ -186,6 +210,9 @@ echo "GCP_PROJECT: $GCP_PROJECT"
echo "GCP_SERVICE_ACCOUNT: $GCP_SERVICE_ACCOUNT"
echo "GCP_REGION: $GCP_REGION"
echo "GCP_ZONE: $GCP_ZONE"
echo "AWS_ACCESS_KEY_ID: $AWS_ACCESS_KEY_ID"
echo "AWS_SECRET_ACCESS_KEY: $AWS_SECRET_ACCESS_KEY"
echo "AWS_REGION: $AWS_REGION"
echo "KUBE_VERSION: $KUBE_VERSION"
echo "KUBE_WORKERS: $KUBE_WORKERS"
echo "DOCKER_IO_MIRROR: $DOCKER_IO_MIRROR"
Expand Down Expand Up @@ -384,7 +411,7 @@ elif [ "$PROVIDER" == "gke" ]; then
echo "error: GCP_REGION or GCP_ZONE cannot be both set"
exit 1
fi
echo "info: preparing ssh keypairs for GCP"
echo "info: preparing ssh keypairs for GCP"
if [ ! -d ~/.ssh ]; then
mkdir ~/.ssh
fi
Expand Down Expand Up @@ -414,6 +441,23 @@ elif [ "$PROVIDER" == "gke" ]; then
--zone "$GCP_ZONE"
)
fi
elif [ "$PROVIDER" == "eks" ]; then
hack::ensure_aws_k8s_tester
if [ -n "$AWS_REGION" ]; then
aws configure set default.region "$AWS_REGION"
fi
if [ -n "$AWS_ACCESS_KEY_ID" ]; then
aws configure set aws_access_key_id "$AWS_ACCESS_KEY_ID"
fi
if [ -n "$AWS_SECRET_ACCESS_KEY" ]; then
aws configure set aws_secret_access_key "$AWS_SECRET_ACCESS_KEY"
fi
export AWS_K8S_TESTER_EKS_NAME=$CLUSTER
export AWS_K8S_TESTER_EKS_CONFIG_PATH=/tmp/kubetest2.eks.$CLUSTER
export AWS_K8S_TESTER_EKS_ADD_ON_NLB_HELLO_WORLD_ENABLE="false"
export AWS_K8S_TESTER_EKS_ADD_ON_MANAGED_NODE_GROUPS_MNGS='{"aws-k8s-tester-tidb-operator-mng":{"name":"aws-k8s-tester-tidb-operator-mng","ami-type":"AL2_x86_64","asg-min-size":3,"asg-max-size":3,"asg-desired-capacity":3,"instance-types":["c5.xlarge"],"volume-size":40}}'
# override KUBECONFIG
KUBECONFIG=$AWS_K8S_TESTER_EKS_CONFIG_PATH.kubeconfig.yaml
else
echo "error: unsupported provider '$PROVIDER'"
exit 1
Expand All @@ -429,6 +473,11 @@ export TIDB_OPERATOR_IMAGE=$DOCKER_REGISTRY/pingcap/tidb-operator:${IMAGE_TAG}
export E2E_IMAGE=$DOCKER_REGISTRY/pingcap/tidb-operator-e2e:${IMAGE_TAG}
export PATH=$PATH:$OUTPUT_BIN

# Environments for kubetest2
if [ -n "${REPORT_DIR:-}" ]; then
export ARTIFACTS=${REPORT_DIR:-}
fi

hack::ensure_kubetest2
echo "info: run 'kubetest2 ${kubetest2_args[@]} -- hack/run-e2e.sh $@'"
$KUBETSTS2_BIN ${kubetest2_args[@]} -- hack/run-e2e.sh "$@"
32 changes: 28 additions & 4 deletions hack/lib.sh
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,13 @@ HELM_BIN=$OUTPUT_BIN/helm
HELM_VERSION=${HELM_VERSION:-2.9.1}
KIND_VERSION=${KIND_VERSION:-0.7.0}
KIND_BIN=$OUTPUT_BIN/kind
KUBETEST2_VERSION=v0.0.1+a810685993a3e100f4c51bc346cdc05eaf753922
KUBETEST2_GKE_VERSION=v0.0.1+a3755779de7f745733de10f9bf63e01cf0864f9d
KUBETEST2_KIND_VERSION=v0.0.1+d8d70a33d2cc5df85786b7724ac61c221bad3e18
KUBETEST2_VERSION=v0.0.2+8e0a95176a86e6bdbc0b5aa18c40fa5542828d15
KUBETEST2_GKE_VERSION=v0.0.2+5b16ede1983db0dfc384145f4c559db15f80c14f
KUBETEST2_EKS_VERSION=v0.0.2+ddbcc8482c65d8f230511a73b1188791af96ed6b
KUBETEST2_KIND_VERSION=v0.0.2+a81b87d6155611e5561c9642d25a14d4911fb8a7
KUBETSTS2_BIN=$OUTPUT_BIN/kubetest2
AWS_K8S_TESTER_VERSION=v0.6.2
AWS_K8S_TESTER_BIN=$OUTPUT_BIN/aws-k8s-tester

test -d "$OUTPUT_BIN" || mkdir -p "$OUTPUT_BIN"

Expand Down Expand Up @@ -168,7 +171,7 @@ function hack::__ensure_kubetest2() {
if hack::__verify_kubetest2 $n $h; then
return 0
fi
tmpfile=$(mktemp)
local tmpfile=$(mktemp)
trap "test -f $tmpfile && rm $tmpfile" RETURN
curl --retry 10 -L -o - https://github.com/cofyc/kubetest2/releases/download/$v/$n.gz | gunzip > $tmpfile
mv $tmpfile $OUTPUT_BIN/$n
Expand All @@ -179,4 +182,25 @@ function hack::ensure_kubetest2() {
hack::__ensure_kubetest2 kubetest2 $KUBETEST2_VERSION
hack::__ensure_kubetest2 kubetest2-gke $KUBETEST2_GKE_VERSION
hack::__ensure_kubetest2 kubetest2-kind $KUBETEST2_KIND_VERSION
hack::__ensure_kubetest2 kubetest2-eks $KUBETEST2_EKS_VERSION
}

function hack::verify_aws_k8s_tester() {
if test -x $AWS_K8S_TESTER_BIN; then
[[ "$($AWS_K8S_TESTER_BIN version | awk '/ReleaseVersion/ {print $2}')" == "$AWS_K8S_TESTER_VERSION" ]]
return
fi
return 1
}

function hack::ensure_aws_k8s_tester() {
if hack::verify_aws_k8s_tester; then
return
fi
local DOWNLOAD_URL=https://github.com/aws/aws-k8s-tester/releases/download
local tmpfile=$(mktemp)
trap "test -f $tmpfile && rm $tmpfile" RETURN
curl --retry 10 -L -o $tmpfile https://github.com/aws/aws-k8s-tester/releases/download/$AWS_K8S_TESTER_VERSION/aws-k8s-tester-$AWS_K8S_TESTER_VERSION-$OS-$ARCH
mv $tmpfile $AWS_K8S_TESTER_BIN
chmod +x $AWS_K8S_TESTER_BIN
}
66 changes: 65 additions & 1 deletion hack/run-e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ for ((i = 1; i <= 32; i++)) {
}
EOF
done
else
elif [ "$PROVIDER" == "gke" ]; then
# disks are created under /mnt/stateful_partition directory
# https://cloud.google.com/container-optimized-os/docs/concepts/disks-and-filesystem
for n in $($KUBECTL_BIN --context $KUBECONTEXT get nodes -ojsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}'); do
Expand All @@ -136,12 +136,47 @@ for ((i = 1; i <= 32; i++)) {
}
'"'"
done
elif [ "$PROVIDER" == "eks" ]; then
while IFS=$'\n' read -r line; do
read -r id dns <<< $line
echo "info: prepare disks on $dns"
ssh -T -o "StrictHostKeyChecking no" -i ~/.ssh/kube_aws_rsa ec2-user@$dns <<'EOF'
sudo bash -c '
test -d /mnt/disks || mkdir -p /mnt/disks
df -h /mnt/disks
if mountpoint /mnt/disks &>/dev/null; then
echo "info: /mnt/disks is a mountpoint"
else
echo "info: /mnt/disks is not a mountpoint, creating local volumes on the rootfs"
fi
cd /mnt/disks
for ((i = 1; i <= 32; i++)) {
if [ ! -d vol$i ]; then
mkdir vol$i
fi
if ! mountpoint vol$i &>/dev/null; then
mount --bind vol$i vol$i
fi
}
'
EOF
done <<< "$(e2e::__eks_instances)"
fi
echo "info: installing local-volume-provisioner"
$KUBECTL_BIN --context $KUBECONTEXT apply -f ${ROOT}/manifests/local-dind/local-volume-provisioner.yaml
e2e::__wait_for_ds kube-system local-volume-provisioner
}

function e2e::__eks_instances() {
aws ec2 describe-instances --filter Name=tag:eks:cluster-name,Values=$CLUSTER --query 'Reservations[*].Instances[*].{InstanceId:InstanceId,PublicDnsName:PublicDnsName}' --output text
}

function e2e::__ecr_url() {
local account_id=$(aws sts get-caller-identity | awk '/Account/ { gsub("\x27", "", $2); print $2}')
local region=$(aws configure get region)
echo "${account_id}.dkr.ecr.${region}.amazonaws.com"
}

function e2e::get_kube_version() {
$KUBECTL_BIN --context $KUBECONTEXT version --short | awk '/Server Version:/ {print $3}'
}
Expand Down Expand Up @@ -186,6 +221,28 @@ function e2e::image_load() {
docker push $GCP_E2E_IMAGE
TIDB_OPERATOR_IMAGE=$GCP_TIDB_OPERATOR_IMAGE
E2E_IMAGE=$GCP_E2E_IMAGE
elif [ "$PROVIDER" == "eks" ]; then
for repoName in e2e/tidb-operator e2e/tidb-operator-e2e; do
local ret=0
aws ecr describe-repositories --repository-names $repoName || ret=$?
if [ $ret -ne 0 ]; then
echo "info: creating repository $repoName"
aws ecr create-repository --repository-name $repoName
fi
done
local ecrURL=$(e2e::__ecr_url)
echo "info: logging in $ecrURL"
aws ecr get-login-password | docker login --username AWS --password-stdin $ecrURL
AWS_TIDB_OPERATOR_IMAGE=$ecrURL/e2e/tidb-operator:$IMAGE_TAG
AWS_E2E_IMAGE=$ecrURL/e2e/tidb-operator-e2e:$IMAGE_TAG
docker tag $TIDB_OPERATOR_IMAGE $AWS_TIDB_OPERATOR_IMAGE
docker tag $E2E_IMAGE $AWS_E2E_IMAGE
echo "info: pushing $AWS_TIDB_OPERATOR_IMAGE"
docker push $AWS_TIDB_OPERATOR_IMAGE
echo "info: pushing $AWS_E2E_IMAGE"
docker push $AWS_E2E_IMAGE
TIDB_OPERATOR_IMAGE=$AWS_TIDB_OPERATOR_IMAGE
E2E_IMAGE=$AWS_E2E_IMAGE
else
echo "info: unsupported provider '$PROVIDER', skip loading images"
fi
Expand Down Expand Up @@ -262,6 +319,13 @@ docker_args=(
--env KUBECONTEXT=$KUBECONTEXT
)

if [ "$PROVIDER" == "eks" ]; then
# aws credential is required to get token for EKS
docker_args+=(
-v $HOME/.aws:/root/.aws
)
fi

if [ -n "$REPORT_DIR" ]; then
docker_args+=(
-v $REPORT_DIR:$REPORT_DIR
Expand Down
8 changes: 6 additions & 2 deletions tests/images/e2e/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
FROM alpine:3.10
FROM debian:buster-slim

ENV KUBECTL_VERSION=v1.12.2
ENV HELM_VERSION=v2.9.1

RUN apk update && apk add --no-cache ca-certificates curl git openssl bash mysql-client
RUN apt-get update && \
apt-get install -y ca-certificates curl git openssl default-mysql-client unzip
RUN curl https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl \
-o /usr/local/bin/kubectl && \
chmod +x /usr/local/bin/kubectl && \
Expand All @@ -13,6 +14,9 @@ RUN curl https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VER
mv linux-amd64/helm /usr/local/bin/helm && \
rm -rf linux-amd64 && \
rm helm-${HELM_VERSION}-linux-amd64.tar.gz
RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \
unzip awscliv2.zip && \
./aws/install

ADD tidb-operator /charts/e2e/tidb-operator
ADD tidb-cluster /charts/e2e/tidb-cluster
Expand Down

0 comments on commit 2554f4a

Please sign in to comment.