Skip to content

Commit

Permalink
[PDP-3120] Add GKE support (#7)
Browse files Browse the repository at this point in the history
* [PDP-3120] add GKE support

And fix some bugs

* adjust test script

* add vcluster binary

* upgrade vcluster to 0.20.0

* fix typo
  • Loading branch information
syan-tibco committed Aug 16, 2024
1 parent caa0444 commit 7cb4c92
Show file tree
Hide file tree
Showing 24 changed files with 1,114 additions and 66 deletions.
2 changes: 1 addition & 1 deletion charts/common-dependency/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

apiVersion: v2
name: common-dependency
version: "1.0.8"
version: "1.0.10"
appVersion: "1.0.0"
description: common-dependency
type: application
Expand Down
91 changes: 91 additions & 0 deletions charts/common-dependency/scripts/_functions.sh
Original file line number Diff line number Diff line change
Expand Up @@ -308,6 +308,65 @@ function aks-pp-assume-role() {
fi
}

# gcp-federation-assume-role use AWS GCP Federation role to assume to GCP account
# GCPFederation --> aws-federation
function gcp-federation-assume-role() {
if [ -z "${GCP_PROJECT_ID}" ]; then
common::err "GCP_PROJECT_ID is not set"
return 1
fi

if [[ "${PIPELINE_USE_LOCAL_CREDS}" == "true" ]]; then
common::debug "detect PIPELINE_USE_LOCAL_CREDS is true, skip assume role"
return 0
fi

common::debug "assume AWS federation role: ${PIPELINE_GCP_FEDERATION_ROLE}"
pp-aws-assume-role "${PIPELINE_GCP_FEDERATION_ROLE}"
if [ $? -ne 0 ]; then
common::err "pp-aws-assume-role error"
return 1
fi

common::debug "gcloud auth login"
gcloud auth login --brief --quiet --cred-file="${HOME}/.config/gcloud/aws_gcp_federation.json"
if [ $? -ne 0 ]; then
common::err "gcp auth login error"
return 1
fi

common::debug "set to GCP project: ${GCP_PROJECT_ID}"
gcloud config set project --quiet "${GCP_PROJECT_ID}"
if [ $? -ne 0 ]; then
common::err "gcloud config set project error"
return 1
fi
}

# gcp-federation-k8s-cluster generate GKE kubeconfig
function gcp-federation-k8s-cluster() {
if ! gcp-federation-assume-role; then
common::err "gcp-federation-assume-role error"
return 1
fi

if [ -z "${CLUSTER_NAME}" ]; then
common::err "Please set CLUSTER_NAME environment variable"
return 1
fi

if [ -z "${GCP_REGION}" ]; then
common::err "Please set GCP_REGION environment variable"
return 1
fi

gcloud container clusters get-credentials "${CLUSTER_NAME}" --zone "${GCP_REGION}" --project "${GCP_PROJECT_ID}"
if [ $? -ne 0 ]; then
common::err "generate kubeconfig for GKE ${GCP_PROJECT_ID}/${CLUSTER_NAME} failed"
return 1
fi
}

#######################################
# common::assume_role will automatically detect if it is on-prem, Azure or AWS account and try to connect to target account
# after connecting to target account if CLUSTER_NAME is set, it will try to refresh kubeconfig token for CLUSTER_NAME
Expand Down Expand Up @@ -371,6 +430,34 @@ function common::assume_role() {
return 0
fi

# GCP use case
if echo "${_account}" | grep -q "gcp-"; then
common::debug "Looks like select GCP account ${_account}"

# check if we have get-azure-sub-id function
if declare -F get-gcp-project > /dev/null; then
if ! get-gcp-project "${_account}"; then
common::err "get GCP project error"
return 1
fi
fi

# if we set CLUSTER_NAME then we will try to generate kubeconfig
if [[ -n "${_cluster_name}" ]]; then
common::debug "detect CLUSTER_NAME is set to ${_cluster_name}"
gcp-federation-k8s-cluster
return $?
else
# if not then we will just assume to Azure account
if ! gcp-federation-assume-role; then
common::err "gcp-federation-assume-role error"
return 1
fi
fi

return 0
fi

# azure use case
# the pattern is azure-72f677ccb9aa, the last section of Azure sub id
if echo "${_account}" | grep -q "azure-"; then
Expand Down Expand Up @@ -1064,8 +1151,12 @@ function init() {
exit 1
fi

# pipeline outbound ip address
export PIPELINE_OUTBOUND_IP_ADDRESS=${PIPELINE_OUTBOUND_IP_ADDRESS:-"${TIBCO_PROVISIONER_OUTBOUND_IP_ADDRESS}"}
# setup cloud account roles
export PIPELINE_AWS_MANAGED_ACCOUNT_ROLE=${PIPELINE_AWS_MANAGED_ACCOUNT_ROLE:-"${TIBCO_AWS_CONTROLLED_ACCOUNT_ROLE}"}
# Cloud provider federation roles
export PIPELINE_GCP_FEDERATION_ROLE=${PIPELINE_GCP_FEDERATION_ROLE:-"${TIBCO_GCP_FEDERATION_ROLE}"}
export PIPELINE_AZURE_FEDERATION_ROLE=${PIPELINE_AZURE_FEDERATION_ROLE:-"${TIBCO_AZURE_FEDERATION_ROLE}"}
export PIPELINE_AWS_COGNITO_IDENTITY_POOL=${PIPELINE_AWS_COGNITO_IDENTITY_POOL:-"${TIBCO_AWS_COGNITO_IDENTITY_POOL}"}
export PIPELINE_AWS_COGNITO_IDENTITY_POOL_LOGINS=${PIPELINE_AWS_COGNITO_IDENTITY_POOL_LOGINS:-"${TIBCO_AWS_COGNITO_IDENTITY_POOL_LOGINS}"}
Expand Down
2 changes: 1 addition & 1 deletion charts/helm-install/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

apiVersion: v2
name: helm-install
version: "1.0.5"
version: "1.0.6"
appVersion: "1.0.0"
description: helm-install
type: application
Expand Down
4 changes: 2 additions & 2 deletions charts/helm-install/scripts/_funcs_helm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -331,7 +331,7 @@ function process_chart_flags() {
local _values_flag=${5}

# start of helm command
echo -n "${HELM_COMMAND_LINE} " > "${_install_cmd_file}"
echo -n "${HELM_COMMAND_LINE} " >> "${_install_cmd_file}"

local _chart_debug=""
_chart_debug=$(echo "${_chart_flags_section}" | common::yq4-get '.debug')
Expand Down Expand Up @@ -442,7 +442,7 @@ function installChart() {
return 0
fi

local _install_cmd_file=chart-install-cmd.txt
local _install_cmd_file=${PIPELINE_HELM_INSTALL_CMD_FILE_NAME:-"chart-install-cmd.txt"}

process_chart_flags "${_chart_name}" "${_chart_namespace}" "${_chart_flags_section}" "${_install_cmd_file}" "${_values_flag}"

Expand Down
2 changes: 1 addition & 1 deletion charts/provisioner-config-local/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ apiVersion: v2
name: provisioner-config-local
description: Platform Provisioner local config
type: application
version: 1.0.29
version: 1.0.30
appVersion: "2.0.0"
home: https://github.com/TIBCOSoftware/tp-helm-charts
maintainers:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,11 @@ options:
guiType: input
reference: "meta.guiEnv.GUI_CP_CONTAINER_REGISTRY_PASSWORD"
description: "The container registry password to deploy CP"
- name: "GUI_CP_CONTAINER_REGISTRY_REPOSITORY"
type: string
guiType: input
reference: "meta.guiEnv.GUI_CP_CONTAINER_REGISTRY_REPOSITORY"
description: "The container registry repository to deploy CP"
- name: "GUI_TP_TLS_CERT"
type: string
guiType: input
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ meta:
GUI_CP_CONTAINER_REGISTRY: csgprduswrepoedge.jfrog.io
GUI_CP_CONTAINER_REGISTRY_USERNAME: ""
GUI_CP_CONTAINER_REGISTRY_PASSWORD: ""
GUI_CP_CONTAINER_REGISTRY_REPOSITORY: tibco-platform-docker-prod
# TLS
GUI_TP_TLS_CERT: ""
GUI_TP_TLS_KEY: ""
Expand Down Expand Up @@ -58,6 +59,7 @@ meta:
CP_CONTAINER_REGISTRY: ${GUI_CP_CONTAINER_REGISTRY:-"csgprduswrepoedge.jfrog.io"}
CP_CONTAINER_REGISTRY_USERNAME: "${GUI_CP_CONTAINER_REGISTRY_USERNAME}"
CP_CONTAINER_REGISTRY_PASSWORD: "${GUI_CP_CONTAINER_REGISTRY_PASSWORD}"
CP_CONTAINER_REGISTRY_REPOSITORY: ${GUI_CP_CONTAINER_REGISTRY_REPOSITORY:-"tibco-platform-docker-prod"}
# node
CP_NODE_CIDR: 10.180.0.0/16
CP_POD_CIDR: 10.180.0.0/16
Expand Down Expand Up @@ -91,7 +93,7 @@ meta:
TP_CERTIFICATE_CLUSTER_ISSUER: ${GUI_TP_CERTIFICATE_CLUSTER_ISSUER:-"tp-prod"} # the cluster issuer for tp-certificate
# CP version
CP_PLATFORM_BOOTSTRAP_VERSION: ${GUI_CP_PLATFORM_BOOTSTRAP_VERSION:-1.2.23} # 1.2 GA release. use ^1.0.0 for latest
CP_PLATFORM_BASE_VERSION: ${CP_PLATFORM_BASE_VERSION:-1.2.140} # 1.2 GA release. use ^1.0.0 for latest
CP_PLATFORM_BASE_VERSION: ${GUI_CP_PLATFORM_BASE_VERSION:-1.2.140} # 1.2 GA release. use ^1.0.0 for latest
# flow control
CP_CREATE_NAMESPACE: true
CP_INSTALL_MAILDEV: true
Expand Down Expand Up @@ -427,9 +429,10 @@ helmCharts:
fluentbit:
enabled: false
containerRegistry:
password: "${CP_CONTAINER_REGISTRY_PASSWORD}"
url: ${CP_CONTAINER_REGISTRY}
password: "${CP_CONTAINER_REGISTRY_PASSWORD}"
username: "${CP_CONTAINER_REGISTRY_USERNAME}"
repository: "${CP_CONTAINER_REGISTRY_REPOSITORY}"
controlPlaneInstanceId: ${CP_INSTANCE_ID}
enableLogging: true
serviceAccount: ${CP_INSTANCE_ID}-sa
Expand Down
3 changes: 2 additions & 1 deletion dev/platform-provisioner-install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ if [[ ${PIPELINE_SKIP_PROVISIONER_UI} == "false" ]]; then
exit 1
fi
[[ -z "${PIPELINE_GUI_DOCKER_IMAGE_USERNAME}" ]] && export PIPELINE_GUI_DOCKER_IMAGE_USERNAME=${PIPELINE_GUI_DOCKER_IMAGE_USERNAME:-"AWS"}
[[ -z "${PIPELINE_GUI_DOCKER_IMAGE_REPO_FULL_URL}" ]] && export PIPELINE_GUI_DOCKER_IMAGE_REPO_FULL_URL=${PIPELINE_GUI_DOCKER_IMAGE_REPO_FULL_URL:-"${PIPELINE_GUI_DOCKER_IMAGE_REPO}/stratosphere/cic2-provisioner-webui"}
fi

# The tekton version to install
Expand Down Expand Up @@ -151,7 +152,7 @@ fi
# install provisioner web ui
helm upgrade --install -n "${PIPELINE_NAMESPACE}" platform-provisioner-ui platform-provisioner-ui --repo "${PLATFORM_PROVISIONER_PIPELINE_REPO}" \
--version ^1.0.0 \
--set image.repository="${PIPELINE_GUI_DOCKER_IMAGE_REPO}"/stratosphere/cic2-provisioner-webui \
--set image.repository="${PIPELINE_GUI_DOCKER_IMAGE_REPO_FULL_URL}" \
--set image.tag=latest \
--set "imagePullSecrets[0].name=${_image_pull_secret_name}" \
--set guiConfig.onPremMode=true \
Expand Down
3 changes: 2 additions & 1 deletion dev/platform-provisioner-pipelinerun.sh
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,8 @@ keys='$account, $region, $pipeline_service_account_name $user_name, $random_numb
recipe_replaced=$(envsubst "${keys}" <<< "${recipe_template}")
echo "create tekton ${pipeline_name} pipelinerun ${account}-${random_number} for ${user_name}"
#echo "${recipe_replaced}"
if ! kubectl apply -f <(echo "${recipe_replaced}"); then
# works for windows git bash as well
if ! echo "${recipe_replaced}" | kubectl apply -f -; then
echo "kubectl apply error"
exit 1
fi
144 changes: 144 additions & 0 deletions dev/platform-provisioner-test.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
#!/bin/bash

#
# © 2024 Cloud Software Group, Inc.
# All Rights Reserved. Confidential & Proprietary.
#

# the idea of this script is to provide same run time env as tekton pipeline
#######################################
# setupRepo this will clone repo and copy files to current folder
# Globals:
# ACCOUNT: the aws account you want to assume to
# REGION: the cloud region
# AWS_PROFILE: the aws profile; we normally needs to do AWS sso login to update this profile
# GITHUB_TOKEN: the github token
# PIPELINE_PATH: the pipeline path
# PIPELINE_TRIGGER_RUN_SH: true or other string if true, will run task directly. if other string, will just go to bash
# PIPELINE_INPUT_RECIPE: the input file name; default is recipe.yaml
# PIPELINE_MOCK: true or other string if true, will mock run pipeline. (only run meta part)
# PIPELINE_LOG_DEBUG: true or other string if true, will print pipeline debug log
# PIPELINE_VALIDATE_INPUT: true or other string if true, will validate input against cue schema
# PIPELINE_CHECK_DOCKER_STATUS: true only when set to false to skip check docker status
# PIPELINE_INITIAL_ASSUME_ROLE: true only when set to false to skip initial assume to target account
# PIPELINE_USE_LOCAL_CREDS: false only when set to true to use local creds
# PIPELINE_FUNCTION_INIT: true only when set to false to skip function init which is used to load Environment specific functions and envs for pipeline
# PIPELINE_AWS_MANAGED_ACCOUNT_ROLE: the role to assume to. We will use current AWS role to assume to this role to perform the task. current role --> "arn:aws:iam::${_account}:role/${PIPELINE_AWS_MANAGED_ACCOUNT_ROLE}"
# Arguments:
# None
# Returns:
# 0 if thing was deleted, non-zero on error
# Notes:
# This script needs to be run in the dev folder
# Recipe: The full path of the recipe file. The script will automatically load this recipe as input
# Docker run command: It will mount necessary folder and bring environment variables to the container.
# It will also run the command defined in all pipeline task.
# It will also run the task if PIPELINE_TRIGGER_RUN_SH is set to true.
# Samples:
# export PIPELINE_INPUT_RECIPE="recipe.yaml"
# export ACCOUNT="on-prem"
# ./platform-provisioner.sh generic-runner
#######################################

set +x

[[ -z "${PIPELINE_DOCKER_IMAGE}" ]] && export PIPELINE_DOCKER_IMAGE=${PIPELINE_DOCKER_IMAGE:-"platform-provisioner:latest"}

# setup dev path
DEV_PATH=`pwd`
cd .. || exit
[[ -z "${PIPELINE_PATH}" ]] && PIPELINE_PATH=`pwd`
cd "${DEV_PATH}" || exit

# we need to set REGION, otherwise the INPUT will not be loaded
[[ -z "${REGION}" ]] && export REGION="us-west-2"

[[ -z "${PIPELINE_INPUT_RECIPE}" ]] && export PIPELINE_INPUT_RECIPE="recipe.yaml"
[[ -z "${PIPELINE_NAME}" ]] && export PIPELINE_NAME=$(yq ".kind | select(. != null)" "${PIPELINE_INPUT_RECIPE}")
[[ -z "${PIPELINE_TRIGGER_RUN_SH}" ]] && export PIPELINE_TRIGGER_RUN_SH="true"
[[ -z "${PIPELINE_LOG_DEBUG}" ]] && export PIPELINE_LOG_DEBUG="false"
# For local test; we enable this flag by default
[[ -z "${PIPELINE_USE_LOCAL_CREDS}" ]] && export PIPELINE_USE_LOCAL_CREDS="true"
# For this script; we need to skip check docker status. The docker compose should set to true
[[ -z "${PIPELINE_MOCK}" ]] && export PIPELINE_MOCK="false"
[[ -z "${PIPELINE_CHECK_DOCKER_STATUS}" ]] && export PIPELINE_CHECK_DOCKER_STATUS="false"
# we don't want to initial assume role for local run
[[ -z "${PIPELINE_INITIAL_ASSUME_ROLE}" ]] && export PIPELINE_INITIAL_ASSUME_ROLE="false"
[[ -z "${PIPELINE_VALIDATE_INPUT}" ]] && export PIPELINE_VALIDATE_INPUT="true"

# This case is to use the default kubeconfig file. The default kubeconfig file is ~/.kube/config.
# We will mount this file to the container and rename to config-on-prem to avoid conflict with container kubeconfig file
[[ -z "${PIPELINE_ON_PREM_KUBECONFIG}" ]] && export PIPELINE_ON_PREM_KUBECONFIG="false"

# this case is used for on prem cluster; user will specify kubeconfig file name
if [[ -z "${PIPELINE_ON_PREM_KUBECONFIG_FILE_NAME}" ]]; then
export DOCKER_MOUNT_KUBECONFIG_FILE_NAME="target=/tmp1"
else
export DOCKER_MOUNT_KUBECONFIG_FILE_NAME="type=bind,source=${HOME}/.kube/${PIPELINE_ON_PREM_KUBECONFIG_FILE_NAME},target=/root/.kube/${PIPELINE_ON_PREM_KUBECONFIG_FILE_NAME}"
fi

# this is used for k8s on docker for mac
if [[ "${PIPELINE_ON_PREM_DOCKER_FOR_MAC}" == "true" ]]; then
DOCKER_FOR_MAC_NODE_IP=$(kubectl get nodes -o yaml | yq '.items[].status.addresses[] | select(.type == "InternalIP") | .address')
export DOCKER_FOR_MAC_ADD_HOST="--add-host=kubernetes.docker.internal:${DOCKER_FOR_MAC_NODE_IP}"
else
export DOCKER_FOR_MAC_ADD_HOST="--add-host=kubernetes.docker.internal:127.0.0.1"
fi

# will only pass the content of the recipe file to the container
export PIPLINE_INPUT_RECIPE_CONTENT=""
[[ -f "${PIPELINE_INPUT_RECIPE}" ]] && PIPLINE_INPUT_RECIPE_CONTENT=$(cat ${PIPELINE_INPUT_RECIPE})

echo "Working with pipline: ${PIPELINE_PATH}/charts/${PIPELINE_NAME}"

echo "Using docker image: ${PIPELINE_DOCKER_IMAGE}"

# is used to export functions; so subshell can use it
docker run -it --rm \
--name provisioner-pipeline-task \
--net host \
-e ACCOUNT \
-e REGION \
-e AWS_PROFILE \
-e AWS_ACCESS_KEY_ID \
-e AWS_SECRET_ACCESS_KEY \
-e AWS_SESSION_TOKEN \
-e GITHUB_TOKEN \
-e PIPELINE_TRIGGER_RUN_SH \
-e PIPELINE_INPUT_RECIPE \
-e PIPLINE_INPUT_RECIPE_CONTENT \
-e PIPELINE_MOCK \
-e PIPELINE_LOG_DEBUG \
-e PIPELINE_CHECK_DOCKER_STATUS \
-e PIPELINE_INITIAL_ASSUME_ROLE \
-e PIPELINE_USE_LOCAL_CREDS \
-e PIPELINE_FUNCTION_INIT \
-e PIPELINE_VALIDATE_INPUT \
-e PIPELINE_ON_PREM_KUBECONFIG \
-e PIPELINE_ON_PREM_KUBECONFIG_FILE_NAME \
-e PIPELINE_AWS_MANAGED_ACCOUNT_ROLE \
-e PIPELINE_NAME \
-v `pwd`:/tmp/dev \
-v "${HOME}"/.aws:/root/.aws \
-v "${HOME}"/.azure:/root/.azure \
-v "${HOME}"/.config/gcloud:/root/.config/gcloud \
-v "${HOME}"/.kube/config:/root/.kube/config-on-prem \
--mount "${DOCKER_MOUNT_KUBECONFIG_FILE_NAME}" \
"${DOCKER_FOR_MAC_ADD_HOST}" \
-v "${HOME}"/.docker:/root/.docker -v /var/run/docker.sock:/var/run/docker.sock \
-v "${PIPELINE_PATH}"/charts:/tmp/charts \
"${PIPELINE_DOCKER_IMAGE}" bash -c 'export REGION=${REGION:-"us-west-2"} \
&& declare -xr WORKING_PATH=/workspace \
&& declare -xr SCRIPTS=${WORKING_PATH}/task-scripts \
&& declare -xr INPUT="${PIPLINE_INPUT_RECIPE_CONTENT}" \
&& [[ -z ${PIPELINE_NAME} ]] && export PIPELINE_NAME=$(echo "${PIPLINE_INPUT_RECIPE_CONTENT}" | yq4 ".kind | select(. != null)" ) \
&& echo "using pipeline: ${PIPELINE_NAME}" \
&& [[ -z ${PIPELINE_NAME} ]] && { echo "PIPELINE_NAME can not be empty"; exit 1; } || true \
&& mkdir -p "${SCRIPTS}" \
&& cp -LR /tmp/charts/common-dependency/scripts/* "${SCRIPTS}" \
&& cp -LR /tmp/charts/${PIPELINE_NAME}/scripts/* "${SCRIPTS}" \
&& chmod +x "${SCRIPTS}"/*.sh \
&& cd "${SCRIPTS}" \
&& set -a && . _functions.sh && set +a \
&& [[ -z ${ACCOUNT} ]] && { echo "ACCOUNT can not be empty"; exit 1; } || true \
&& [[ "${PIPELINE_TRIGGER_RUN_SH}" == "true" ]] && ./run.sh ${ACCOUNT} ${REGION} "${INPUT}" || bash'
3 changes: 2 additions & 1 deletion dev/platform-provisioner.sh
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ echo "Using platform provisioner docker image: ${PIPELINE_DOCKER_IMAGE}"
# is used to export functions; so subshell can use it
docker run -it --rm \
--name provisioner-pipeline-task \
--net ${PIPELINE_CONTAINER_NETWORK} \
--net "${PIPELINE_CONTAINER_NETWORK}" \
-e ACCOUNT \
-e REGION \
-e AWS_PROFILE \
Expand All @@ -117,6 +117,7 @@ docker run -it --rm \
-e PIPELINE_NAME \
-v "${HOME}"/.aws:/root/.aws \
-v "${HOME}"/.azure:/root/.azure \
-v "${HOME}"/.config/gcloud:/root/.config/gcloud \
-v "${HOME}"/.kube/config:/root/.kube/config-on-prem \
--mount "${DOCKER_MOUNT_KUBECONFIG_FILE_NAME}" \
"${DOCKER_FOR_MAC_ADD_HOST}" \
Expand Down
Loading

0 comments on commit 7cb4c92

Please sign in to comment.