diff --git a/charts/common-dependency/Chart.yaml b/charts/common-dependency/Chart.yaml index b29592e..11d52ba 100644 --- a/charts/common-dependency/Chart.yaml +++ b/charts/common-dependency/Chart.yaml @@ -6,7 +6,7 @@ apiVersion: v2 name: common-dependency -version: "1.0.8" +version: "1.0.10" appVersion: "1.0.0" description: common-dependency type: application diff --git a/charts/common-dependency/scripts/_functions.sh b/charts/common-dependency/scripts/_functions.sh index 8721876..eaa7575 100755 --- a/charts/common-dependency/scripts/_functions.sh +++ b/charts/common-dependency/scripts/_functions.sh @@ -308,6 +308,65 @@ function aks-pp-assume-role() { fi } +# gcp-federation-assume-role use AWS GCP Federation role to assume to GCP account +# GCPFederation --> aws-federation +function gcp-federation-assume-role() { + if [ -z "${GCP_PROJECT_ID}" ]; then + common::err "GCP_PROJECT_ID is not set" + return 1 + fi + + if [[ "${PIPELINE_USE_LOCAL_CREDS}" == "true" ]]; then + common::debug "detect PIPELINE_USE_LOCAL_CREDS is true, skip assume role" + return 0 + fi + + common::debug "assume AWS federation role: ${PIPELINE_GCP_FEDERATION_ROLE}" + pp-aws-assume-role "${PIPELINE_GCP_FEDERATION_ROLE}" + if [ $? -ne 0 ]; then + common::err "pp-aws-assume-role error" + return 1 + fi + + common::debug "gcloud auth login" + gcloud auth login --brief --quiet --cred-file="${HOME}/.config/gcloud/aws_gcp_federation.json" + if [ $? -ne 0 ]; then + common::err "gcp auth login error" + return 1 + fi + + common::debug "set to GCP project: ${GCP_PROJECT_ID}" + gcloud config set project --quiet "${GCP_PROJECT_ID}" + if [ $? -ne 0 ]; then + common::err "gcloud config set project error" + return 1 + fi +} + +# gcp-federation-k8s-cluster generate GKE kubeconfig +function gcp-federation-k8s-cluster() { + if ! gcp-federation-assume-role; then + common::err "gcp-federation-assume-role error" + return 1 + fi + + if [ -z "${CLUSTER_NAME}" ]; then + common::err "Please set CLUSTER_NAME environment variable" + return 1 + fi + + if [ -z "${GCP_REGION}" ]; then + common::err "Please set GCP_REGION environment variable" + return 1 + fi + + gcloud container clusters get-credentials "${CLUSTER_NAME}" --zone "${GCP_REGION}" --project "${GCP_PROJECT_ID}" + if [ $? -ne 0 ]; then + common::err "generate kubeconfig for GKE ${GCP_PROJECT_ID}/${CLUSTER_NAME} failed" + return 1 + fi +} + ####################################### # common::assume_role will automatically detect if it is on-prem, Azure or AWS account and try to connect to target account # after connecting to target account if CLUSTER_NAME is set, it will try to refresh kubeconfig token for CLUSTER_NAME @@ -371,6 +430,34 @@ function common::assume_role() { return 0 fi + # GCP use case + if echo "${_account}" | grep -q "gcp-"; then + common::debug "Looks like select GCP account ${_account}" + + # check if we have get-azure-sub-id function + if declare -F get-gcp-project > /dev/null; then + if ! get-gcp-project "${_account}"; then + common::err "get GCP project error" + return 1 + fi + fi + + # if we set CLUSTER_NAME then we will try to generate kubeconfig + if [[ -n "${_cluster_name}" ]]; then + common::debug "detect CLUSTER_NAME is set to ${_cluster_name}" + gcp-federation-k8s-cluster + return $? + else + # if not then we will just assume to Azure account + if ! gcp-federation-assume-role; then + common::err "gcp-federation-assume-role error" + return 1 + fi + fi + + return 0 + fi + # azure use case # the pattern is azure-72f677ccb9aa, the last section of Azure sub id if echo "${_account}" | grep -q "azure-"; then @@ -1064,8 +1151,12 @@ function init() { exit 1 fi + # pipeline outbound ip address + export PIPELINE_OUTBOUND_IP_ADDRESS=${PIPELINE_OUTBOUND_IP_ADDRESS:-"${TIBCO_PROVISIONER_OUTBOUND_IP_ADDRESS}"} # setup cloud account roles export PIPELINE_AWS_MANAGED_ACCOUNT_ROLE=${PIPELINE_AWS_MANAGED_ACCOUNT_ROLE:-"${TIBCO_AWS_CONTROLLED_ACCOUNT_ROLE}"} + # Cloud provider federation roles + export PIPELINE_GCP_FEDERATION_ROLE=${PIPELINE_GCP_FEDERATION_ROLE:-"${TIBCO_GCP_FEDERATION_ROLE}"} export PIPELINE_AZURE_FEDERATION_ROLE=${PIPELINE_AZURE_FEDERATION_ROLE:-"${TIBCO_AZURE_FEDERATION_ROLE}"} export PIPELINE_AWS_COGNITO_IDENTITY_POOL=${PIPELINE_AWS_COGNITO_IDENTITY_POOL:-"${TIBCO_AWS_COGNITO_IDENTITY_POOL}"} export PIPELINE_AWS_COGNITO_IDENTITY_POOL_LOGINS=${PIPELINE_AWS_COGNITO_IDENTITY_POOL_LOGINS:-"${TIBCO_AWS_COGNITO_IDENTITY_POOL_LOGINS}"} diff --git a/charts/helm-install/Chart.yaml b/charts/helm-install/Chart.yaml index a4d3f0b..8155385 100644 --- a/charts/helm-install/Chart.yaml +++ b/charts/helm-install/Chart.yaml @@ -6,7 +6,7 @@ apiVersion: v2 name: helm-install -version: "1.0.5" +version: "1.0.6" appVersion: "1.0.0" description: helm-install type: application diff --git a/charts/helm-install/scripts/_funcs_helm.sh b/charts/helm-install/scripts/_funcs_helm.sh index 8f4df53..6b12ed1 100644 --- a/charts/helm-install/scripts/_funcs_helm.sh +++ b/charts/helm-install/scripts/_funcs_helm.sh @@ -331,7 +331,7 @@ function process_chart_flags() { local _values_flag=${5} # start of helm command - echo -n "${HELM_COMMAND_LINE} " > "${_install_cmd_file}" + echo -n "${HELM_COMMAND_LINE} " >> "${_install_cmd_file}" local _chart_debug="" _chart_debug=$(echo "${_chart_flags_section}" | common::yq4-get '.debug') @@ -442,7 +442,7 @@ function installChart() { return 0 fi - local _install_cmd_file=chart-install-cmd.txt + local _install_cmd_file=${PIPELINE_HELM_INSTALL_CMD_FILE_NAME:-"chart-install-cmd.txt"} process_chart_flags "${_chart_name}" "${_chart_namespace}" "${_chart_flags_section}" "${_install_cmd_file}" "${_values_flag}" diff --git a/charts/provisioner-config-local/Chart.yaml b/charts/provisioner-config-local/Chart.yaml index 93c6288..94d1fb6 100644 --- a/charts/provisioner-config-local/Chart.yaml +++ b/charts/provisioner-config-local/Chart.yaml @@ -8,7 +8,7 @@ apiVersion: v2 name: provisioner-config-local description: Platform Provisioner local config type: application -version: 1.0.29 +version: 1.0.30 appVersion: "2.0.0" home: https://github.com/TIBCOSoftware/tp-helm-charts maintainers: diff --git a/charts/provisioner-config-local/config/pp-deploy-cp-core-on-prem.yaml b/charts/provisioner-config-local/config/pp-deploy-cp-core-on-prem.yaml index dddc2b4..d91046f 100644 --- a/charts/provisioner-config-local/config/pp-deploy-cp-core-on-prem.yaml +++ b/charts/provisioner-config-local/config/pp-deploy-cp-core-on-prem.yaml @@ -52,6 +52,11 @@ options: guiType: input reference: "meta.guiEnv.GUI_CP_CONTAINER_REGISTRY_PASSWORD" description: "The container registry password to deploy CP" +- name: "GUI_CP_CONTAINER_REGISTRY_REPOSITORY" + type: string + guiType: input + reference: "meta.guiEnv.GUI_CP_CONTAINER_REGISTRY_REPOSITORY" + description: "The container registry repository to deploy CP" - name: "GUI_TP_TLS_CERT" type: string guiType: input diff --git a/charts/provisioner-config-local/recipes/pp-deploy-cp-core-on-prem.yaml b/charts/provisioner-config-local/recipes/pp-deploy-cp-core-on-prem.yaml index 0ef6c41..bca2ec1 100644 --- a/charts/provisioner-config-local/recipes/pp-deploy-cp-core-on-prem.yaml +++ b/charts/provisioner-config-local/recipes/pp-deploy-cp-core-on-prem.yaml @@ -21,6 +21,7 @@ meta: GUI_CP_CONTAINER_REGISTRY: csgprduswrepoedge.jfrog.io GUI_CP_CONTAINER_REGISTRY_USERNAME: "" GUI_CP_CONTAINER_REGISTRY_PASSWORD: "" + GUI_CP_CONTAINER_REGISTRY_REPOSITORY: tibco-platform-docker-prod # TLS GUI_TP_TLS_CERT: "" GUI_TP_TLS_KEY: "" @@ -58,6 +59,7 @@ meta: CP_CONTAINER_REGISTRY: ${GUI_CP_CONTAINER_REGISTRY:-"csgprduswrepoedge.jfrog.io"} CP_CONTAINER_REGISTRY_USERNAME: "${GUI_CP_CONTAINER_REGISTRY_USERNAME}" CP_CONTAINER_REGISTRY_PASSWORD: "${GUI_CP_CONTAINER_REGISTRY_PASSWORD}" + CP_CONTAINER_REGISTRY_REPOSITORY: ${GUI_CP_CONTAINER_REGISTRY_REPOSITORY:-"tibco-platform-docker-prod"} # node CP_NODE_CIDR: 10.180.0.0/16 CP_POD_CIDR: 10.180.0.0/16 @@ -91,7 +93,7 @@ meta: TP_CERTIFICATE_CLUSTER_ISSUER: ${GUI_TP_CERTIFICATE_CLUSTER_ISSUER:-"tp-prod"} # the cluster issuer for tp-certificate # CP version CP_PLATFORM_BOOTSTRAP_VERSION: ${GUI_CP_PLATFORM_BOOTSTRAP_VERSION:-1.2.23} # 1.2 GA release. use ^1.0.0 for latest - CP_PLATFORM_BASE_VERSION: ${CP_PLATFORM_BASE_VERSION:-1.2.140} # 1.2 GA release. use ^1.0.0 for latest + CP_PLATFORM_BASE_VERSION: ${GUI_CP_PLATFORM_BASE_VERSION:-1.2.140} # 1.2 GA release. use ^1.0.0 for latest # flow control CP_CREATE_NAMESPACE: true CP_INSTALL_MAILDEV: true @@ -427,9 +429,10 @@ helmCharts: fluentbit: enabled: false containerRegistry: - password: "${CP_CONTAINER_REGISTRY_PASSWORD}" url: ${CP_CONTAINER_REGISTRY} + password: "${CP_CONTAINER_REGISTRY_PASSWORD}" username: "${CP_CONTAINER_REGISTRY_USERNAME}" + repository: "${CP_CONTAINER_REGISTRY_REPOSITORY}" controlPlaneInstanceId: ${CP_INSTANCE_ID} enableLogging: true serviceAccount: ${CP_INSTANCE_ID}-sa diff --git a/dev/platform-provisioner-install.sh b/dev/platform-provisioner-install.sh index e3ccb4c..0510e1d 100755 --- a/dev/platform-provisioner-install.sh +++ b/dev/platform-provisioner-install.sh @@ -50,6 +50,7 @@ if [[ ${PIPELINE_SKIP_PROVISIONER_UI} == "false" ]]; then exit 1 fi [[ -z "${PIPELINE_GUI_DOCKER_IMAGE_USERNAME}" ]] && export PIPELINE_GUI_DOCKER_IMAGE_USERNAME=${PIPELINE_GUI_DOCKER_IMAGE_USERNAME:-"AWS"} + [[ -z "${PIPELINE_GUI_DOCKER_IMAGE_REPO_FULL_URL}" ]] && export PIPELINE_GUI_DOCKER_IMAGE_REPO_FULL_URL=${PIPELINE_GUI_DOCKER_IMAGE_REPO_FULL_URL:-"${PIPELINE_GUI_DOCKER_IMAGE_REPO}/stratosphere/cic2-provisioner-webui"} fi # The tekton version to install @@ -151,7 +152,7 @@ fi # install provisioner web ui helm upgrade --install -n "${PIPELINE_NAMESPACE}" platform-provisioner-ui platform-provisioner-ui --repo "${PLATFORM_PROVISIONER_PIPELINE_REPO}" \ --version ^1.0.0 \ - --set image.repository="${PIPELINE_GUI_DOCKER_IMAGE_REPO}"/stratosphere/cic2-provisioner-webui \ + --set image.repository="${PIPELINE_GUI_DOCKER_IMAGE_REPO_FULL_URL}" \ --set image.tag=latest \ --set "imagePullSecrets[0].name=${_image_pull_secret_name}" \ --set guiConfig.onPremMode=true \ diff --git a/dev/platform-provisioner-pipelinerun.sh b/dev/platform-provisioner-pipelinerun.sh index 594a1dc..3fd4d92 100755 --- a/dev/platform-provisioner-pipelinerun.sh +++ b/dev/platform-provisioner-pipelinerun.sh @@ -98,7 +98,8 @@ keys='$account, $region, $pipeline_service_account_name $user_name, $random_numb recipe_replaced=$(envsubst "${keys}" <<< "${recipe_template}") echo "create tekton ${pipeline_name} pipelinerun ${account}-${random_number} for ${user_name}" #echo "${recipe_replaced}" -if ! kubectl apply -f <(echo "${recipe_replaced}"); then +# works for windows git bash as well +if ! echo "${recipe_replaced}" | kubectl apply -f -; then echo "kubectl apply error" exit 1 fi diff --git a/dev/platform-provisioner-test.sh b/dev/platform-provisioner-test.sh new file mode 100755 index 0000000..7cf698c --- /dev/null +++ b/dev/platform-provisioner-test.sh @@ -0,0 +1,144 @@ +#!/bin/bash + +# +# © 2024 Cloud Software Group, Inc. +# All Rights Reserved. Confidential & Proprietary. +# + +# the idea of this script is to provide same run time env as tekton pipeline +####################################### +# setupRepo this will clone repo and copy files to current folder +# Globals: +# ACCOUNT: the aws account you want to assume to +# REGION: the cloud region +# AWS_PROFILE: the aws profile; we normally needs to do AWS sso login to update this profile +# GITHUB_TOKEN: the github token +# PIPELINE_PATH: the pipeline path +# PIPELINE_TRIGGER_RUN_SH: true or other string if true, will run task directly. if other string, will just go to bash +# PIPELINE_INPUT_RECIPE: the input file name; default is recipe.yaml +# PIPELINE_MOCK: true or other string if true, will mock run pipeline. (only run meta part) +# PIPELINE_LOG_DEBUG: true or other string if true, will print pipeline debug log +# PIPELINE_VALIDATE_INPUT: true or other string if true, will validate input against cue schema +# PIPELINE_CHECK_DOCKER_STATUS: true only when set to false to skip check docker status +# PIPELINE_INITIAL_ASSUME_ROLE: true only when set to false to skip initial assume to target account +# PIPELINE_USE_LOCAL_CREDS: false only when set to true to use local creds +# PIPELINE_FUNCTION_INIT: true only when set to false to skip function init which is used to load Environment specific functions and envs for pipeline +# PIPELINE_AWS_MANAGED_ACCOUNT_ROLE: the role to assume to. We will use current AWS role to assume to this role to perform the task. current role --> "arn:aws:iam::${_account}:role/${PIPELINE_AWS_MANAGED_ACCOUNT_ROLE}" +# Arguments: +# None +# Returns: +# 0 if thing was deleted, non-zero on error +# Notes: +# This script needs to be run in the dev folder +# Recipe: The full path of the recipe file. The script will automatically load this recipe as input +# Docker run command: It will mount necessary folder and bring environment variables to the container. +# It will also run the command defined in all pipeline task. +# It will also run the task if PIPELINE_TRIGGER_RUN_SH is set to true. +# Samples: +# export PIPELINE_INPUT_RECIPE="recipe.yaml" +# export ACCOUNT="on-prem" +# ./platform-provisioner.sh generic-runner +####################################### + +set +x + +[[ -z "${PIPELINE_DOCKER_IMAGE}" ]] && export PIPELINE_DOCKER_IMAGE=${PIPELINE_DOCKER_IMAGE:-"platform-provisioner:latest"} + +# setup dev path +DEV_PATH=`pwd` +cd .. || exit +[[ -z "${PIPELINE_PATH}" ]] && PIPELINE_PATH=`pwd` +cd "${DEV_PATH}" || exit + +# we need to set REGION, otherwise the INPUT will not be loaded +[[ -z "${REGION}" ]] && export REGION="us-west-2" + +[[ -z "${PIPELINE_INPUT_RECIPE}" ]] && export PIPELINE_INPUT_RECIPE="recipe.yaml" +[[ -z "${PIPELINE_NAME}" ]] && export PIPELINE_NAME=$(yq ".kind | select(. != null)" "${PIPELINE_INPUT_RECIPE}") +[[ -z "${PIPELINE_TRIGGER_RUN_SH}" ]] && export PIPELINE_TRIGGER_RUN_SH="true" +[[ -z "${PIPELINE_LOG_DEBUG}" ]] && export PIPELINE_LOG_DEBUG="false" +# For local test; we enable this flag by default +[[ -z "${PIPELINE_USE_LOCAL_CREDS}" ]] && export PIPELINE_USE_LOCAL_CREDS="true" +# For this script; we need to skip check docker status. The docker compose should set to true +[[ -z "${PIPELINE_MOCK}" ]] && export PIPELINE_MOCK="false" +[[ -z "${PIPELINE_CHECK_DOCKER_STATUS}" ]] && export PIPELINE_CHECK_DOCKER_STATUS="false" +# we don't want to initial assume role for local run +[[ -z "${PIPELINE_INITIAL_ASSUME_ROLE}" ]] && export PIPELINE_INITIAL_ASSUME_ROLE="false" +[[ -z "${PIPELINE_VALIDATE_INPUT}" ]] && export PIPELINE_VALIDATE_INPUT="true" + +# This case is to use the default kubeconfig file. The default kubeconfig file is ~/.kube/config. +# We will mount this file to the container and rename to config-on-prem to avoid conflict with container kubeconfig file +[[ -z "${PIPELINE_ON_PREM_KUBECONFIG}" ]] && export PIPELINE_ON_PREM_KUBECONFIG="false" + +# this case is used for on prem cluster; user will specify kubeconfig file name +if [[ -z "${PIPELINE_ON_PREM_KUBECONFIG_FILE_NAME}" ]]; then + export DOCKER_MOUNT_KUBECONFIG_FILE_NAME="target=/tmp1" +else + export DOCKER_MOUNT_KUBECONFIG_FILE_NAME="type=bind,source=${HOME}/.kube/${PIPELINE_ON_PREM_KUBECONFIG_FILE_NAME},target=/root/.kube/${PIPELINE_ON_PREM_KUBECONFIG_FILE_NAME}" +fi + +# this is used for k8s on docker for mac +if [[ "${PIPELINE_ON_PREM_DOCKER_FOR_MAC}" == "true" ]]; then + DOCKER_FOR_MAC_NODE_IP=$(kubectl get nodes -o yaml | yq '.items[].status.addresses[] | select(.type == "InternalIP") | .address') + export DOCKER_FOR_MAC_ADD_HOST="--add-host=kubernetes.docker.internal:${DOCKER_FOR_MAC_NODE_IP}" +else + export DOCKER_FOR_MAC_ADD_HOST="--add-host=kubernetes.docker.internal:127.0.0.1" +fi + +# will only pass the content of the recipe file to the container +export PIPLINE_INPUT_RECIPE_CONTENT="" +[[ -f "${PIPELINE_INPUT_RECIPE}" ]] && PIPLINE_INPUT_RECIPE_CONTENT=$(cat ${PIPELINE_INPUT_RECIPE}) + +echo "Working with pipline: ${PIPELINE_PATH}/charts/${PIPELINE_NAME}" + +echo "Using docker image: ${PIPELINE_DOCKER_IMAGE}" + +# is used to export functions; so subshell can use it +docker run -it --rm \ + --name provisioner-pipeline-task \ + --net host \ + -e ACCOUNT \ + -e REGION \ + -e AWS_PROFILE \ + -e AWS_ACCESS_KEY_ID \ + -e AWS_SECRET_ACCESS_KEY \ + -e AWS_SESSION_TOKEN \ + -e GITHUB_TOKEN \ + -e PIPELINE_TRIGGER_RUN_SH \ + -e PIPELINE_INPUT_RECIPE \ + -e PIPLINE_INPUT_RECIPE_CONTENT \ + -e PIPELINE_MOCK \ + -e PIPELINE_LOG_DEBUG \ + -e PIPELINE_CHECK_DOCKER_STATUS \ + -e PIPELINE_INITIAL_ASSUME_ROLE \ + -e PIPELINE_USE_LOCAL_CREDS \ + -e PIPELINE_FUNCTION_INIT \ + -e PIPELINE_VALIDATE_INPUT \ + -e PIPELINE_ON_PREM_KUBECONFIG \ + -e PIPELINE_ON_PREM_KUBECONFIG_FILE_NAME \ + -e PIPELINE_AWS_MANAGED_ACCOUNT_ROLE \ + -e PIPELINE_NAME \ + -v `pwd`:/tmp/dev \ + -v "${HOME}"/.aws:/root/.aws \ + -v "${HOME}"/.azure:/root/.azure \ + -v "${HOME}"/.config/gcloud:/root/.config/gcloud \ + -v "${HOME}"/.kube/config:/root/.kube/config-on-prem \ + --mount "${DOCKER_MOUNT_KUBECONFIG_FILE_NAME}" \ + "${DOCKER_FOR_MAC_ADD_HOST}" \ + -v "${HOME}"/.docker:/root/.docker -v /var/run/docker.sock:/var/run/docker.sock \ + -v "${PIPELINE_PATH}"/charts:/tmp/charts \ + "${PIPELINE_DOCKER_IMAGE}" bash -c 'export REGION=${REGION:-"us-west-2"} \ + && declare -xr WORKING_PATH=/workspace \ + && declare -xr SCRIPTS=${WORKING_PATH}/task-scripts \ + && declare -xr INPUT="${PIPLINE_INPUT_RECIPE_CONTENT}" \ + && [[ -z ${PIPELINE_NAME} ]] && export PIPELINE_NAME=$(echo "${PIPLINE_INPUT_RECIPE_CONTENT}" | yq4 ".kind | select(. != null)" ) \ + && echo "using pipeline: ${PIPELINE_NAME}" \ + && [[ -z ${PIPELINE_NAME} ]] && { echo "PIPELINE_NAME can not be empty"; exit 1; } || true \ + && mkdir -p "${SCRIPTS}" \ + && cp -LR /tmp/charts/common-dependency/scripts/* "${SCRIPTS}" \ + && cp -LR /tmp/charts/${PIPELINE_NAME}/scripts/* "${SCRIPTS}" \ + && chmod +x "${SCRIPTS}"/*.sh \ + && cd "${SCRIPTS}" \ + && set -a && . _functions.sh && set +a \ + && [[ -z ${ACCOUNT} ]] && { echo "ACCOUNT can not be empty"; exit 1; } || true \ + && [[ "${PIPELINE_TRIGGER_RUN_SH}" == "true" ]] && ./run.sh ${ACCOUNT} ${REGION} "${INPUT}" || bash' diff --git a/dev/platform-provisioner.sh b/dev/platform-provisioner.sh index 9f7e198..5c954b4 100755 --- a/dev/platform-provisioner.sh +++ b/dev/platform-provisioner.sh @@ -92,7 +92,7 @@ echo "Using platform provisioner docker image: ${PIPELINE_DOCKER_IMAGE}" # is used to export functions; so subshell can use it docker run -it --rm \ --name provisioner-pipeline-task \ - --net ${PIPELINE_CONTAINER_NETWORK} \ + --net "${PIPELINE_CONTAINER_NETWORK}" \ -e ACCOUNT \ -e REGION \ -e AWS_PROFILE \ @@ -117,6 +117,7 @@ docker run -it --rm \ -e PIPELINE_NAME \ -v "${HOME}"/.aws:/root/.aws \ -v "${HOME}"/.azure:/root/.azure \ + -v "${HOME}"/.config/gcloud:/root/.config/gcloud \ -v "${HOME}"/.kube/config:/root/.kube/config-on-prem \ --mount "${DOCKER_MOUNT_KUBECONFIG_FILE_NAME}" \ "${DOCKER_FOR_MAC_ADD_HOST}" \ diff --git a/docker/Dockerfile b/docker/Dockerfile index 9f4a399..748e5a0 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -8,9 +8,9 @@ # https://docs.aws.amazon.com/cli/latest/userguide/getting-started-source-install.html#source-getting-started-install-workflows-alpine # release notes: https://raw.githubusercontent.com/aws/aws-cli/v2/CHANGELOG.rst -FROM python:3.11.7-alpine3.19 as builder-aws +FROM python:3.11-alpine3.19 AS builder-aws ARG AWS_CLI_VERSION -ENV AWS_CLI_VERSION=${AWS_CLI_VERSION:-"2.15.48"} +ENV AWS_CLI_VERSION=${AWS_CLI_VERSION:-"2.17.27"} # Install build dependencies RUN apk add --no-cache \ @@ -33,9 +33,9 @@ RUN apk add --no-cache \ # https://github.com/Azure/azure-cli/issues/19591 # https://github.com/Azure/azure-cli/releases -FROM python:3.11.7-alpine3.19 as builder-azure +FROM python:3.11-alpine3.19 AS builder-azure ARG AZURE_CLI_VERSION -ENV AZURE_CLI_VERSION=${AZURE_CLI_VERSION:-"2.60.0"} +ENV AZURE_CLI_VERSION=${AZURE_CLI_VERSION:-"2.63.0"} WORKDIR /azure RUN apk add --no-cache --update python3 py3-pip && \ apk add --no-cache --update --virtual=build gcc musl-dev python3-dev libffi-dev openssl-dev cargo make && \ @@ -61,34 +61,43 @@ RUN apk add --no-cache --upgrade coreutils util-linux gettext bash curl wget env # COPY AWSCLI V2 to BASE_IMAGE COPY --from=builder-aws /opt/aws-cli/ /opt/aws-cli/ -RUN ln -s /opt/aws-cli/bin/aws /usr/local/bin/aws +RUN ln -s /opt/aws-cli/bin/aws /usr/local/bin/aws && \ + aws --version # COPY azure cli to BASE_IMAGE COPY --from=builder-azure /azure /azure -RUN ln -s /usr/bin/python3 /usr/local/bin/python && ln -s /azure/.venv/bin/az /usr/local/bin/az +RUN ln -s /usr/bin/python3 /usr/local/bin/python && ln -s /azure/.venv/bin/az /usr/local/bin/az && \ + az version + +# gcloud cli https://cloud.google.com/sdk/docs/release-notes +ARG CLOUD_SDK_VERSION +ENV CLOUD_SDK_VERSION=${CLOUD_SDK_VERSION:-"487.0.0"} +ENV PATH=/google-cloud-sdk/bin:$PATH +RUN if [ `uname -m` = 'x86_64' ]; then echo -n "x86_64" > /tmp/arch; else echo -n "arm" > /tmp/arch; fi; +RUN ARCH=`cat /tmp/arch` && \ + curl -O https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-cli-${CLOUD_SDK_VERSION}-linux-${ARCH}.tar.gz && \ + tar xzf google-cloud-cli-${CLOUD_SDK_VERSION}-linux-${ARCH}.tar.gz && \ + rm google-cloud-cli-${CLOUD_SDK_VERSION}-linux-${ARCH}.tar.gz && \ + export CLOUDSDK_CORE_DISABLE_PROMPTS=1 && \ + gcloud components install beta gke-gcloud-auth-plugin && \ + gcloud config set core/disable_usage_reporting true && \ + gcloud config set component_manager/disable_update_check true && \ + gcloud config set metrics/environment docker_image_alpine && \ + gcloud --version # eksctl https://github.com/eksctl-io/eksctl/releases ARG EKSCTL_VERSION -ENV EKSCTL_VERSION=${EKSCTL_VERSION:-"0.176"} +ENV EKSCTL_VERSION=${EKSCTL_VERSION:-"v0.188.0"} RUN CMD_NAME=eksctl && \ - curl --silent --location "https://github.com/eksctl-io/eksctl/releases/download/v${EKSCTL_VERSION}.0/${CMD_NAME}_$(uname -s)_${TARGETARCH}.tar.gz" | tar xz -C /tmp && \ + curl --silent --location "https://github.com/eksctl-io/eksctl/releases/download/${EKSCTL_VERSION}/${CMD_NAME}_$(uname -s)_${TARGETARCH}.tar.gz" | tar xz -C /tmp && \ chmod +x /tmp/${CMD_NAME} && mv /tmp/${CMD_NAME} /usr/local/bin/${CMD_NAME}-${EKSCTL_VERSION} && \ ln -sf /usr/local/bin/${CMD_NAME}-${EKSCTL_VERSION} /usr/local/bin/${CMD_NAME} && \ ${CMD_NAME} version -# calicoctl https://github.com/projectcalico/calico/releases -ARG CALICOCTL_VERSION -ENV CALICOCTL_VERSION=${CALICOCTL_VERSION:-"v3.28.0"} -RUN CMD_NAME=calicoctl && \ - curl -sLO https://github.com/projectcalico/calico/releases/download/${CALICOCTL_VERSION}/calicoctl-${TARGETOS}-${TARGETARCH} && \ - chmod +x ./${CMD_NAME}-${TARGETOS}-${TARGETARCH} && mv -f ./${CMD_NAME}-${TARGETOS}-${TARGETARCH} /usr/local/bin/${CMD_NAME}-3.23 && \ - ln -sf /usr/local/bin/${CMD_NAME}-3.23 /usr/local/bin/${CMD_NAME} && \ - { CALICO_DATASTORE_TYPE=${CALICO_DATASTORE_TYPE} ${CMD_NAME} version 2>/dev/null || echo -n ""; } - # aws ecr credentials helper # https://github.com/awslabs/amazon-ecr-credential-helper/releases ARG ECR_LOGIN_VERSION -ENV ECR_LOGIN_VERSION=${ECR_LOGIN_VERSION:-"0.7.1"} +ENV ECR_LOGIN_VERSION=${ECR_LOGIN_VERSION:-"0.8.0"} RUN echo "/${ECR_LOGIN_VERSION}/${TARGETARCH}/docker-credential-ecr-login" && \ CMD_NAME=docker-credential-ecr-login && \ curl --silent --location "https://amazon-ecr-credential-helper-releases.s3.us-east-2.amazonaws.com/${ECR_LOGIN_VERSION}/${TARGETOS}-${TARGETARCH}/docker-credential-ecr-login" -o /tmp/${CMD_NAME} && \ @@ -98,16 +107,16 @@ RUN echo "/${ECR_LOGIN_VERSION}/${TARGETARCH}/docker-credential-ecr-login" && \ # cue for yaml validation https://github.com/cue-lang/cue/releases ARG CUE_VERSION -ENV CUE_VERSION=${CUE_VERSION:-"0.8"} +ENV CUE_VERSION=${CUE_VERSION:-"v0.9.2"} RUN CMD_NAME=cue && \ - curl --silent --location "https://github.com/cue-lang/cue/releases/download/v${CUE_VERSION}.0/${CMD_NAME}_v${CUE_VERSION}.0_${TARGETOS}_${TARGETARCH}.tar.gz" | tar xz -C /tmp && \ + curl --silent --location "https://github.com/cue-lang/cue/releases/download/${CUE_VERSION}/${CMD_NAME}_${CUE_VERSION}_${TARGETOS}_${TARGETARCH}.tar.gz" | tar xz -C /tmp && \ chmod +x /tmp/${CMD_NAME} && mv /tmp/${CMD_NAME} /usr/local/bin/${CMD_NAME}-${CUE_VERSION} && \ ln -sf /usr/local/bin/${CMD_NAME}-${CUE_VERSION} /usr/local/bin/${CMD_NAME} && \ ${CMD_NAME} version # Azure CLI Bicep https://github.com/Azure/bicep/releases ARG AZURE_CLI_BICEP_VERSION -ENV AZURE_CLI_BICEP_VERSION=${AZURE_CLI_BICEP_VERSION:-"v0.27.1"} +ENV AZURE_CLI_BICEP_VERSION=${AZURE_CLI_BICEP_VERSION:-"v0.29.47"} RUN echo "AZURE_CLI_BICEP_VERSION=${AZURE_CLI_BICEP_VERSION}" && \ az --version && \ az bicep install --version ${AZURE_CLI_BICEP_VERSION} && \ @@ -119,34 +128,42 @@ RUN ln -sf /usr/bin/yq /usr/local/bin/yq4 # The following are for recipe meta.tools compatibility # yq 4 utility https://github.com/mikefarah/yq/releases -ENV YQ_VERSION_440=${YQ_VERSION_440:-"v4.44.1"} +ARG YQ_VERSION_440 +ENV YQ_VERSION_440=${YQ_VERSION_440:-"v4.44.3"} RUN CMD_NAME=yq && \ curl -sSLo /usr/local/bin/${CMD_NAME}-4.40 "https://github.com/mikefarah/${CMD_NAME}/releases/download/${YQ_VERSION_440}/${CMD_NAME}_${TARGETOS}_${TARGETARCH}" && \ chmod +x /usr/local/bin/${CMD_NAME}-4.40 -# helm https://github.com/helm/helm/releases -ENV HELM_VERSION_313=${HELM_VERSION_313:-"v3.13.3"} -RUN CMD_NAME=helm && \ - curl --silent --location "https://get.helm.sh/${CMD_NAME}-${HELM_VERSION_313}-${TARGETOS}-${TARGETARCH}.tar.gz" | tar xz -C /tmp && \ - chmod +x /tmp/${TARGETOS}-${TARGETARCH}/${CMD_NAME} && mv -f /tmp/${TARGETOS}-${TARGETARCH}/${CMD_NAME} /usr/local/bin/${CMD_NAME}-3.13 +# vcluster for vcluster https://github.com/loft-sh/vcluster/releases +ARG VCLUSTER_VERSION=v0.20.0 +RUN export CMD_NAME=vcluster && \ + curl --silent --location -o /tmp/${CMD_NAME} "https://github.com/loft-sh/vcluster/releases/download/${VCLUSTER_VERSION}/${CMD_NAME}-${TARGETOS}-${TARGETARCH}" && \ + chmod +x /tmp/${CMD_NAME} && mv /tmp/${CMD_NAME} /usr/local/bin/${CMD_NAME}-${VCLUSTER_VERSION} && \ + ln -sf /usr/local/bin/${CMD_NAME}-${VCLUSTER_VERSION} /usr/local/bin/${CMD_NAME} && \ + ${CMD_NAME} version +# helm https://github.com/helm/helm/releases +ARG HELM_VERSION_314 ENV HELM_VERSION_314=${HELM_VERSION_314:-"v3.14.4"} RUN CMD_NAME=helm && \ curl --silent --location "https://get.helm.sh/${CMD_NAME}-${HELM_VERSION_314}-${TARGETOS}-${TARGETARCH}.tar.gz" | tar xz -C /tmp && \ chmod +x /tmp/${TARGETOS}-${TARGETARCH}/${CMD_NAME} && mv -f /tmp/${TARGETOS}-${TARGETARCH}/${CMD_NAME} /usr/local/bin/${CMD_NAME}-3.14 -# kubectl https://kubernetes.io/releases/ -ENV KUBECTL_VERSION_128=${KUBECTL_VERSION_128:-"v1.28.7"} -RUN CMD_NAME=kubectl && \ - curl -sLO https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION_128}/bin/${TARGETOS}/${TARGETARCH}/${CMD_NAME} && \ - chmod +x ./${CMD_NAME} && mv -f ./${CMD_NAME} /usr/local/bin/${CMD_NAME}-1.28 +ARG HELM_VERSION_315 +ENV HELM_VERSION_315=${HELM_VERSION_315:-"v3.15.3"} +RUN CMD_NAME=helm && \ + curl --silent --location "https://get.helm.sh/${CMD_NAME}-${HELM_VERSION_315}-${TARGETOS}-${TARGETARCH}.tar.gz" | tar xz -C /tmp && \ + chmod +x /tmp/${TARGETOS}-${TARGETARCH}/${CMD_NAME} && mv -f /tmp/${TARGETOS}-${TARGETARCH}/${CMD_NAME} /usr/local/bin/${CMD_NAME}-3.15 -ENV KUBECTL_VERSION_129=${KUBECTL_VERSION_129:-"v1.29.4"} +# kubectl https://kubernetes.io/releases/ +ARG KUBECTL_VERSION_129 +ENV KUBECTL_VERSION_129=${KUBECTL_VERSION_129:-"v1.29.7"} RUN CMD_NAME=kubectl && \ curl -sLO https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION_129}/bin/${TARGETOS}/${TARGETARCH}/${CMD_NAME} && \ chmod +x ./${CMD_NAME} && mv -f ./${CMD_NAME} /usr/local/bin/${CMD_NAME}-1.29 -ENV KUBECTL_VERSION_130=${KUBECTL_VERSION_130:-"v1.30.0"} +ARG KUBECTL_VERSION_130 +ENV KUBECTL_VERSION_130=${KUBECTL_VERSION_130:-"v1.30.3"} RUN CMD_NAME=kubectl && \ curl -sLO https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION_130}/bin/${TARGETOS}/${TARGETARCH}/${CMD_NAME} && \ chmod +x ./${CMD_NAME} && mv -f ./${CMD_NAME} /usr/local/bin/${CMD_NAME}-1.30 diff --git a/docker/build.sh b/docker/build.sh index e86df0f..1b8ac11 100755 --- a/docker/build.sh +++ b/docker/build.sh @@ -9,6 +9,7 @@ # build.sh this will docker image for platform-provisioner # Globals: # IMAGE_TAG: tag to use for the image. If not specified, latest will be used +# IMAGE_NAME: name of the image to build # DOCKER_REGISTRY: docker registry to push the image to. If not specified, image will be built locally # PUSH_DOCKER_IMAGE: if true, image will be pushed to DOCKER_REGISTRY # PLATFORM: platform to build the image for. If not specified, linux/amd64,linux/arm64 will be used for multiarch build, linux/amd64 for local build @@ -20,7 +21,9 @@ # Notes: # None # Samples: -# ./build.sh +# Case 1: ./build.sh # this will build image for local platform with ${IMAGE_NAME}:latest +# Case 2: DOCKER_REGISTRY= IMAGE_TAG=v1 ./build.sh # this will build image with tag v1 eg: /${IMAGE_NAME}:v1 +# Case 3: DOCKER_REGISTRY= PUSH_DOCKER_IMAGE=true ./build.sh # this will build and push image to the registry ####################################### # build-push-multiarch build and push multiarch image @@ -75,21 +78,22 @@ function build-local() { # main function main() { + IMAGE_NAME=${IMAGE_NAME:-"platform-provisioner"} IMAGE_TAG=${IMAGE_TAG:-"latest"} - IMAGE_NAME="platform-provisioner:${IMAGE_TAG}" - DOCKERFILE="Dockerfile" + _image_and_tag="${IMAGE_NAME}:${IMAGE_TAG}" + DOCKERFILE=${DOCKERFILE:"Dockerfile"} BUILD_ARGS=${BUILD_ARGS:-"--build-arg AWS_CLI_VERSION=${AWS_CLI_VERSION} --build-arg EKSCTL_VERSION=${EKSCTL_VERSION}"} if [[ "${DOCKER_REGISTRY}" != "" ]]; then - IMAGE_NAME="${DOCKER_REGISTRY}/${IMAGE_NAME}" + _image_and_tag="${DOCKER_REGISTRY}/${_image_and_tag}" fi if [[ "${PUSH_DOCKER_IMAGE}" == "true" ]] && [[ "${DOCKER_REGISTRY}" != "" ]]; then - # more infor about platform flag: https://docs.docker.com/engine/reference/commandline/buildx_build/#platform + # more info about platform flag: https://docs.docker.com/engine/reference/commandline/buildx_build/#platform PLATFORM=${PLATFORM:-"linux/amd64,linux/arm64"} - echo "Building and pushing to ${IMAGE_NAME}" - if ! build-push-multiarch "${PLATFORM}" "${IMAGE_NAME}" "${DOCKERFILE}" "${BUILD_ARGS}"; then - echo "Failed to build and push image ${IMAGE_NAME}" + echo "Building and pushing to ${_image_and_tag}" + if ! build-push-multiarch "${PLATFORM}" "${_image_and_tag}" "${DOCKERFILE}" "${BUILD_ARGS}"; then + echo "Failed to build and push image ${_image_and_tag}" return 1 fi else @@ -103,8 +107,8 @@ function main() { fi fi echo "Building locally for ${PLATFORM}" - if ! build-local "${PLATFORM}" "${IMAGE_NAME}" "${DOCKERFILE}" "${BUILD_ARGS}"; then - echo "Failed to build image ${IMAGE_NAME}" + if ! build-local "${PLATFORM}" "${_image_and_tag}" "${DOCKERFILE}" "${BUILD_ARGS}"; then + echo "Failed to build image ${_image_and_tag}" return 1 fi fi diff --git a/docs/recipes/controlplane/tp-cp.yaml b/docs/recipes/controlplane/tp-cp.yaml index bfcda98..0170dfd 100644 --- a/docs/recipes/controlplane/tp-cp.yaml +++ b/docs/recipes/controlplane/tp-cp.yaml @@ -36,8 +36,8 @@ meta: CP_CONTAINER_REGISTRY_USERNAME: "${GUI_CP_CONTAINER_REGISTRY_USERNAME}" CP_CONTAINER_REGISTRY_PASSWORD: "${GUI_CP_CONTAINER_REGISTRY_PASSWORD}" # node - CP_NODE_CIDR: 10.180.0.0/16 - CP_POD_CIDR: 10.180.0.0/16 + CP_NODE_CIDR: ${GUI_TP_CLUSTER_CIDR:-"10.180.0.0/16"} + CP_POD_CIDR: ${GUI_TP_CLUSTER_CIDR:-"10.180.0.0/16"} CP_ADMIN_EMAIL: ${GUI_CP_ADMIN_EMAIL:-"cp-test@gmail.com"} # third party CP_EXT_NAMESPACE: tibco-ext @@ -78,8 +78,6 @@ meta: CP_INSTALL_PLATFORM_BASE: true tools: yq: "4.40" - helm: "3.13" - kubectl: "1.28" preTasks: - condition: ${CP_CREATE_NAMESPACE} clusters: diff --git a/docs/recipes/k8s/cloud/aks.md b/docs/recipes/k8s/cloud/aks.md index c2c212c..877f318 100644 --- a/docs/recipes/k8s/cloud/aks.md +++ b/docs/recipes/k8s/cloud/aks.md @@ -4,7 +4,7 @@ Under the project root; run the following command to test the pipeline and Azure role. Use your own Azure account. You need to make sure that you have log in to your Azure account. The platform provisioner script will create a docker container to run the pipeline scripts with the given recipe. -It will mount `.azure` folder to the docker container to access the Azure config. +It will mount `"${HOME}"/.azure` folder to the docker container to access the Azure config. ```bash export PIPELINE_INPUT_RECIPE="docs/recipes/tests/test-azure.yaml" diff --git a/docs/recipes/k8s/cloud/deploy-tp-aks.yaml b/docs/recipes/k8s/cloud/deploy-tp-aks.yaml index 86024e4..eb05991 100644 --- a/docs/recipes/k8s/cloud/deploy-tp-aks.yaml +++ b/docs/recipes/k8s/cloud/deploy-tp-aks.yaml @@ -16,6 +16,7 @@ meta: # github GITHUB_TOKEN: ${GUI_GITHUB_TOKEN} TP_CHART_REPO: https://tibcosoftware.github.io/tp-helm-charts + PLATFORM_PROVISIONER_REPO: github.com/TIBCOSoftware/platform-provisioner # cluster TP_CLUSTER_NAME: ${GUI_TP_CLUSTER_NAME} TP_CLUSTER_VERSION: ${GUI_TP_CLUSTER_VERSION:-1.29} @@ -75,7 +76,7 @@ preTasks: repo: git: github: - repo: github.com/TIBCOSoftware/platform-provisioner + repo: ${PLATFORM_PROVISIONER_REPO} path: docs/recipes/k8s/cloud/scripts/aks branch: ${TP_SCRIPT_BRANCH} script: @@ -85,7 +86,7 @@ preTasks: repo: git: github: - repo: github.com/TIBCOSoftware/platform-provisioner + repo: ${PLATFORM_PROVISIONER_REPO} path: docs/recipes/k8s/cloud/scripts/aks branch: ${TP_SCRIPT_BRANCH} script: @@ -105,7 +106,7 @@ preTasks: repo: git: github: - repo: github.com/TIBCOSoftware/platform-provisioner + repo: ${PLATFORM_PROVISIONER_REPO} path: docs/recipes/k8s/cloud/scripts/aks branch: ${TP_SCRIPT_BRANCH} script: diff --git a/docs/recipes/k8s/cloud/deploy-tp-eks.yaml b/docs/recipes/k8s/cloud/deploy-tp-eks.yaml index dea9f25..bcf54f2 100644 --- a/docs/recipes/k8s/cloud/deploy-tp-eks.yaml +++ b/docs/recipes/k8s/cloud/deploy-tp-eks.yaml @@ -16,6 +16,7 @@ meta: # github GITHUB_TOKEN: ${GUI_GITHUB_TOKEN} TP_CHART_REPO: https://tibcosoftware.github.io/tp-helm-charts + PLATFORM_PROVISIONER_REPO: github.com/TIBCOSoftware/platform-provisioner # cluster TP_CLUSTER_NAME: ${GUI_TP_CLUSTER_NAME} TP_CLUSTER_VERSION: ${GUI_TP_CLUSTER_VERSION:-1.29} @@ -49,7 +50,6 @@ meta: TP_SCRIPT_NAME_SH_EKS: create-eks.sh # the script that DP will run TP_SCRIPT_NAME_SH_EFS: create-efs.sh # the script that DP will run TP_INSTALL_POSTGRES: ${GUI_TP_INSTALL_POSTGRES:-true} - TP_ENABLE_O11Y_DAEMONSET: ${GUI_TP_ENABLE_O11Y_DAEMONSET:-true} # Do not change, variables to configure nginx/kong related resources based on ingressclass passed TP_ENABLE_NGINX: $([[ "$TP_INGRESS_CLASS" == "nginx" ]] && echo "true" || echo "false") TP_ENABLE_SECONDARY_KONG: $([[ "$TP_SECONDARY_INGRESS_CLASS" == "kong" ]] && echo "true" || echo "false") @@ -66,7 +66,7 @@ preTasks: repo: git: github: - repo: github.com/TIBCOSoftware/platform-provisioner + repo: ${PLATFORM_PROVISIONER_REPO} path: docs/recipes/k8s/cloud/scripts/eks branch: ${TP_SCRIPT_BRANCH} script: @@ -78,7 +78,7 @@ preTasks: repo: git: github: - repo: github.com/TIBCOSoftware/platform-provisioner + repo: ${PLATFORM_PROVISIONER_REPO} path: docs/recipes/k8s/cloud/scripts/eks branch: ${TP_SCRIPT_BRANCH} script: diff --git a/docs/recipes/k8s/cloud/deploy-tp-gke.yaml b/docs/recipes/k8s/cloud/deploy-tp-gke.yaml new file mode 100644 index 0000000..ed6a91d --- /dev/null +++ b/docs/recipes/k8s/cloud/deploy-tp-gke.yaml @@ -0,0 +1,380 @@ +# +# Copyright © 2024. Cloud Software Group, Inc. +# This file is subject to the license terms contained +# in the license file that is distributed with this file. +# + +# Recipe for installing TIBCO Platform GKE +apiVersion: v1 +kind: helm-install +meta: + globalEnvVariable: + # piepline env + REPLACE_RECIPE: true + PIPELINE_LOG_DEBUG: false + PIPELINE_CHECK_DOCKER_STATUS: false + # github + GITHUB_TOKEN: ${GUI_GITHUB_TOKEN} + TP_CHART_REPO: https://tibcosoftware.github.io/tp-helm-charts + PLATFORM_PROVISIONER_REPO: github.com/TIBCOSoftware/platform-provisioner + # cluster + TP_CLUSTER_NAME: ${GUI_TP_CLUSTER_NAME:-tp-cluster} + TP_CLUSTER_VERSION: ${GUI_TP_CLUSTER_VERSION:-"1.29.7-gke.1008000"} + TP_CLUSTER_REGION: ${GUI_TP_CLUSTER_REGION:-"us-west1"} + TP_AUTHORIZED_IP: ${GUI_TP_AUTHORIZED_IP} # your ip x.x.x.x/32 + TP_CLUSTER_VPC_CIDR: ${GUI_TP_CLUSTER_VPC_CIDR:-"10.0.0.0/20"} + TP_CLUSTER_CIDR: ${GUI_TP_CLUSTER_CIDR:-"10.1.0.0/16"} + TP_CLUSTER_SERVICE_CIDR: ${GUI_TP_CLUSTER_SERVICE_CIDR:-"10.2.0.0/20"} + # GCP setting + GCP_PROJECT_ID: ${GUI_GCP_PROJECT_ID} # GCP project is required + GCP_REGION: ${TP_CLUSTER_REGION:-us-west1} # GCP region is required to connect to GKE + GCP_SA_CERT_MANAGER_NAME: tp-cert-manager-sa # GCP service account name for cert-manager + GCP_SA_EXTERNAL_DNS_NAME: tp-external-dns-sa # GCP service account name for external-dns + # domain + TP_TOP_LEVEL_DOMAIN: ${GUI_TP_TOP_LEVEL_DOMAIN} # the top level domain for the main ingress + TP_SANDBOX: ${GUI_TP_SANDBOX} # the sandbox for the main ingress + TP_MAIN_INGRESS_SANDBOX_SUBDOMAIN: ${GUI_TP_MAIN_INGRESS_SANDBOX_SUBDOMAIN} + TP_DOMAIN: ${TP_MAIN_INGRESS_SANDBOX_SUBDOMAIN}.${TP_SANDBOX}.${TP_TOP_LEVEL_DOMAIN} # the actual domain for the TIBCO platform. Sample format: .${SANDBOX}.${TP_TOP_LEVEL_DOMAIN} + # ingress + TP_INSTALL_NGINX_INGRESS: ${GUI_TP_INSTALL_NGINX_INGRESS:-"true"} + TP_INGRESS_CLASS: ${GUI_TP_INGRESS_CLASS:-"nginx"} + TP_INGRESS_NAMESPACE: ingress-system + TP_INGRESS_SERVICE_TYPE: LoadBalancer # NodePort for kind, LoadBalancer for others + TP_CERTIFICATE_CLUSTER_ISSUER: ${GUI_TP_CERTIFICATE_CLUSTER_ISSUER:-"tp-prod"} # the cluster issuer for tp-certificate + TP_ES_RELEASE_NAME: dp-config-es + # storage + TP_STORAGE_CLASS: ${GUI_TP_STORAGE_CLASS:-"standard-rwx-tp"} + # flow control + TP_INSTALL_K8S: ${GUI_TP_INSTALL_K8S:-true} # change to true to install k8s + TP_SCRIPT_BRANCH: main + TP_SCRIPT_NAME_SH_GCP: create-gke.sh + TP_SKIP_GENERATE_CLUSTER_ISSUER: false + TP_INSTALL_CERT_MANAGER: true + TP_INSTALL_EXTERNAL_DNS: true + TP_INSTALL_METRICS_SERVER: true + TP_INSTALL_POSTGRES: true + TP_INSTALL_O11Y: ${GUI_TP_INSTALL_O11Y:-false} + TP_GENERATE_STORAGE_CLASS_GENERIC: true # storage class for GCP Filestore + tools: + yq: "4.40" +preTasks: +- condition: ${TP_INSTALL_K8S} + repo: + git: + github: + repo: ${PLATFORM_PROVISIONER_REPO} + path: docs/recipes/k8s/cloud/scripts/gke + branch: ${TP_SCRIPT_BRANCH} + script: + ignoreErrors: false + fileName: ${TP_SCRIPT_NAME_SH_GCP} +- condition: ${TP_GENERATE_STORAGE_CLASS_GENERIC} + clusters: + - name: ${TP_CLUSTER_NAME} + script: + ignoreErrors: false + fileName: script.sh + content: | + kubectl apply -f - <extra_values.yaml<extra_values.yaml< + +Environment variables that need to set in the recipe: +```yaml +meta: + globalEnvVariable: + # GCP settings + GCP_PROJECT_ID: "" + GCP_REGION: "" # GCP region + # container registry + CP_CONTAINER_REGISTRY: "" # use jFrog for CP production deployment + CP_CONTAINER_REGISTRY_USERNAME: "" + CP_CONTAINER_REGISTRY_PASSWORD: "" + + CP_CLUSTER_NAME: "" + CP_DNS_DOMAIN: "" + CP_STORAGE_CLASS: "standard-rwx-tp" # We create a new storage class for CP + CP_STORAGE_PV_SIZE: "1Ti" # minimum 1Ti + TP_CLUSTER_CIDR: "10.1.0.0/16" # match with deploy-tp-gke.yaml recipe + + CP_INGRESS_CLASSNAME: "nginx" +``` diff --git a/docs/recipes/k8s/cloud/scripts/gke/create-gke.sh b/docs/recipes/k8s/cloud/scripts/gke/create-gke.sh new file mode 100755 index 0000000..31da450 --- /dev/null +++ b/docs/recipes/k8s/cloud/scripts/gke/create-gke.sh @@ -0,0 +1,111 @@ +#!/bin/bash +# +# © 2024 Cloud Software Group, Inc. +# All Rights Reserved. Confidential & Proprietary. +# + +if [ -z "${GCP_PROJECT_ID}" ]; then + echo "Please set GCP_PROJECT_ID environment variable" + exit 1 +fi + +if [ -z "${TP_CLUSTER_NAME}" ]; then + echo "Please set TP_CLUSTER_NAME environment variable" + exit 1 +fi + +# default values +export GCP_REGION=${TP_CLUSTER_REGION:-us-west1} +export TP_CLUSTER_VPC_CIDR=${TP_CLUSTER_VPC_CIDR:-"10.0.0.0/20"} +# must be less than /21 otherwise: Cluster CIDR range is greater than maximum (24 > 21) +export TP_CLUSTER_CIDR=${TP_CLUSTER_CIDR:-"10.1.0.0/16"} +export TP_CLUSTER_SERVICE_CIDR=${TP_CLUSTER_SERVICE_CIDR:-"10.2.0.0/20"} +export TP_CLUSTER_VERSION=${TP_CLUSTER_VERSION:-"1.29.7-gke.1008000"} +export TP_CLUSTER_INSTANCE_TYPE=${TP_CLUSTER_INSTANCE_TYPE:-"e2-standard-4"} +export TP_CLUSTER_DESIRED_CAPACITY=${TP_CLUSTER_DESIRED_CAPACITY:-"2"} + +# add your public ip +# PIPELINE_OUTBOUND_IP_ADDRESS is the outbound ip address of the pipeline engine +AUTHORIZED_IP="${TP_AUTHORIZED_IP:-${PIPELINE_OUTBOUND_IP_ADDRESS}}" +if [ -n "${PIPELINE_OUTBOUND_IP_ADDRESS}" ] && [ -n "${TP_AUTHORIZED_IP}" ]; then + AUTHORIZED_IP="${TP_AUTHORIZED_IP},${PIPELINE_OUTBOUND_IP_ADDRESS}" +fi + +echo "create vpc" +gcloud compute networks create "${TP_CLUSTER_NAME}" \ + --project="${GCP_PROJECT_ID}" \ + --description=TIBCO\ Platform\ VPC \ + --subnet-mode=custom \ + --mtu=1460 \ + --bgp-routing-mode=regional +if [ $? -ne 0 ]; then + echo "create vpc failed" + exit 1 +fi + +echo "create subnet" +gcloud compute networks subnets create "${TP_CLUSTER_NAME}" \ + --network "${TP_CLUSTER_NAME}" \ + --region "${GCP_REGION}" \ + --range "${TP_CLUSTER_VPC_CIDR}" +if [ $? -ne 0 ]; then + echo "create vpc failed" + exit 1 +fi + +echo "create firewall rule" +gcloud compute firewall-rules create "${TP_CLUSTER_NAME}" \ + --project="${GCP_PROJECT_ID}" \ + --network=projects/"${GCP_PROJECT_ID}"/global/networks/"${TP_CLUSTER_NAME}" \ + --description=Allows\ connection\ from\ any\ source\ to\ any\ instance\ on\ the\ network\ using\ custom\ protocols. \ + --direction=INGRESS \ + --priority=65534 \ + --source-ranges="${AUTHORIZED_IP}" \ + --action=ALLOW \ + --rules=all +if [ $? -ne 0 ]; then + echo "create firewall rule failed" + exit 1 +fi + +echo "create GKE" +gcloud beta container \ + --project "${GCP_PROJECT_ID}" \ + clusters create "${TP_CLUSTER_NAME}" \ + --region "${GCP_REGION}" \ + --no-enable-basic-auth \ + --cluster-version "${TP_CLUSTER_VERSION}" \ + --release-channel "regular" \ + --machine-type "${TP_CLUSTER_INSTANCE_TYPE}" \ + --image-type "COS_CONTAINERD" \ + --disk-type "pd-balanced" \ + --disk-size "50" \ + --metadata disable-legacy-endpoints=true \ + --scopes "https://www.googleapis.com/auth/devstorage.read_only","https://www.googleapis.com/auth/logging.write","https://www.googleapis.com/auth/monitoring","https://www.googleapis.com/auth/servicecontrol","https://www.googleapis.com/auth/service.management.readonly","https://www.googleapis.com/auth/trace.append" \ + --num-nodes "${TP_CLUSTER_DESIRED_CAPACITY}" \ + --monitoring=SYSTEM \ + --enable-ip-alias \ + --network "${TP_CLUSTER_NAME}" \ + --subnetwork "${TP_CLUSTER_NAME}" \ + --cluster-ipv4-cidr "${TP_CLUSTER_CIDR}" \ + --services-ipv4-cidr "${TP_CLUSTER_SERVICE_CIDR}" \ + --no-enable-intra-node-visibility \ + --default-max-pods-per-node "110" \ + --enable-autoscaling \ + --total-min-nodes "0" \ + --total-max-nodes "10" \ + --location-policy "BALANCED" \ + --security-posture=standard \ + --workload-vulnerability-scanning=disabled \ + --enable-master-authorized-networks \ + --master-authorized-networks "${AUTHORIZED_IP}" \ + --addons HorizontalPodAutoscaling,HttpLoadBalancing,GcePersistentDiskCsiDriver,GcpFilestoreCsiDriver \ + --enable-autoupgrade \ + --enable-autorepair \ + --max-surge-upgrade 1 \ + --max-unavailable-upgrade 0 \ + --binauthz-evaluation-mode=DISABLED \ + --no-enable-managed-prometheus \ + --workload-pool "${GCP_PROJECT_ID}.svc.id.goog" \ + --enable-shielded-nodes \ + --node-locations "${GCP_REGION}-a" diff --git a/docs/recipes/k8s/cloud/scripts/gke/delete-gke.sh b/docs/recipes/k8s/cloud/scripts/gke/delete-gke.sh new file mode 100755 index 0000000..16e1a26 --- /dev/null +++ b/docs/recipes/k8s/cloud/scripts/gke/delete-gke.sh @@ -0,0 +1,77 @@ +#!/bin/bash +# +# © 2024 Cloud Software Group, Inc. +# All Rights Reserved. Confidential & Proprietary. +# + +if [ -z "${GCP_PROJECT_ID}" ]; then + echo "Please set GCP_PROJECT_ID environment variable" + exit 1 +fi + +if [ -z "${TP_CLUSTER_NAME}" ]; then + echo "Please set TP_CLUSTER_NAME environment variable" + exit 1 +fi + +echo "delete GKE cluster ${TP_CLUSTER_NAME}" +gcloud beta container \ + --project "${GCP_PROJECT_ID}" \ + clusters delete --quiet "${TP_CLUSTER_NAME}" \ + --region "${GCP_REGION}" + +# check if the cluster has been deleted +while true; do + # Attempt to get cluster information + CLUSTER_STATUS=$(gcloud beta container clusters list --project "${GCP_PROJECT_ID}" --region "${GCP_REGION}" --filter="name=${TP_CLUSTER_NAME}" --format="value(status)") + + # If no status is returned, the cluster has been deleted + if [ -z "$CLUSTER_STATUS" ]; then + echo "Cluster ${TP_CLUSTER_NAME} has been successfully deleted." + break + else + echo "Cluster ${TP_CLUSTER_NAME} is still deleting... Current status: ${CLUSTER_STATUS}" + fi + + # Wait 30 seconds before checking again + sleep 30 +done + +# Delete Filestore instances inside the VPC +FILSTORE_INSTANCES=$(gcloud beta filestore instances list --project="${GCP_PROJECT_ID}" --location="${GCP_REGION}"-a --filter="networks.network=${TP_CLUSTER_NAME}" --format="value(name)") + +# Check if there are any Filestore instances to delete +if [ -z "$FILSTORE_INSTANCES" ]; then + echo "No Filestore instances found for cluster ${TP_CLUSTER_NAME}." +else + # Loop through each instance and delete it + for INSTANCE in $FILSTORE_INSTANCES; do + echo "Deleting Filestore instance: ${INSTANCE}" + gcloud beta filestore instances delete "${INSTANCE}" --project="${GCP_PROJECT_ID}" --location="${GCP_REGION}"-a --quiet + echo "Filestore instance ${INSTANCE} deleted." + done +fi + +echo "delete vpc firewall rule" +gcloud compute firewall-rules delete "${TP_CLUSTER_NAME}" --project="${GCP_PROJECT_ID}" --quiet + +# List all subnets associated with the VPC network +SUBNETS=$(gcloud compute networks subnets list --project="${GCP_PROJECT_ID}" --filter="network:${TP_CLUSTER_NAME}" --format="value(name,region)") + +# Check if there are any subnets to delete +if [ -z "$SUBNETS" ]; then + echo "No subnets found for VPC network ${TP_CLUSTER_NAME}." +else + # Loop through each subnet and delete it + echo "Deleting subnets associated with VPC network ${TP_CLUSTER_NAME}..." + while IFS= read -r subnet_info; do + SUBNET_NAME=$(echo "$subnet_info" | awk '{print $1}') + SUBNET_REGION=$(echo "$subnet_info" | awk '{print $2}') + echo "Deleting subnet: ${SUBNET_NAME} in region: ${SUBNET_REGION}" + gcloud compute networks subnets delete "${SUBNET_NAME}" --project="${GCP_PROJECT_ID}" --region="${SUBNET_REGION}" --quiet + echo "Subnet ${SUBNET_NAME} deleted." + done <<< "$SUBNETS" +fi + +echo "delete vpc" +gcloud compute networks delete "${TP_CLUSTER_NAME}" --project="${GCP_PROJECT_ID}" --quiet diff --git a/docs/recipes/k8s/cloud/scripts/gke/prepare-gke-sa.sh b/docs/recipes/k8s/cloud/scripts/gke/prepare-gke-sa.sh new file mode 100755 index 0000000..33edb8b --- /dev/null +++ b/docs/recipes/k8s/cloud/scripts/gke/prepare-gke-sa.sh @@ -0,0 +1,109 @@ +#!/bin/bash +# +# © 2024 Cloud Software Group, Inc. +# All Rights Reserved. Confidential & Proprietary. +# + +####################################### +# prepare-gke-sa - create pre-requisites for GKE cluster +# Globals: +# GCP_PROJECT_ID: gcp project id +# Arguments: +# None +# Returns: +# 0 if thing was deleted, non-zero on error +# Notes: +# None +# Samples: +# None +####################################### + +if [ -z "${GCP_PROJECT_ID}" ]; then + echo "Please set GCP_PROJECT_ID environment variable" + exit 1 +fi + +############################# Part 1 enable APIs ############################# +# Enable Kubernetes Engine API +echo "Enabling Kubernetes Engine API..." +gcloud services enable container.googleapis.com --project="${GCP_PROJECT_ID}" +echo "Kubernetes Engine API enabled." + +# Enable Cloud Filestore API +echo "Enabling Cloud Filestore API..." +gcloud services enable file.googleapis.com --project="${GCP_PROJECT_ID}" +echo "Cloud Filestore API enabled." + +############################# Part 2 setup IAM ############################# + +# Check if the service account exists +function check_service_account() { + _gcp_sa_name=$1 + gcloud iam service-accounts list --filter="email=${_gcp_sa_name}@${GCP_PROJECT_ID}.iam.gserviceaccount.com" --format="value(email)" +} + +# Create Service Account +# create-gcp-k8s-sa +function create-gcp-k8s-sa() { + _gcp_project_id=$1 + _gcp_sa_name=$2 + _gcp_sa_namespace_name=$3 + _gcp_sa_role=$4 + gcloud iam service-accounts create "${_gcp_sa_name}" \ + --display-name "${_gcp_sa_name}" + if [ $? -ne 0 ]; then + echo "Failed to create service account ${_gcp_sa_name}" + return 1 + fi + + gcloud projects add-iam-policy-binding "${_gcp_project_id}" \ + --member "serviceAccount:${_gcp_sa_name}@${_gcp_project_id}.iam.gserviceaccount.com" \ + --role "${_gcp_sa_role}" + if [ $? -ne 0 ]; then + echo "Failed to add role to service account ${_gcp_sa_name}" + return 1 + fi + + gcloud iam service-accounts add-iam-policy-binding \ + --role roles/iam.workloadIdentityUser \ + --member "serviceAccount:${_gcp_project_id}.svc.id.goog[${_gcp_sa_namespace_name}]" \ + "${_gcp_sa_name}@${_gcp_project_id}.iam.gserviceaccount.com" + if [ $? -ne 0 ]; then + echo "Failed to add role to service account ${_gcp_sa_name}" + return 1 + fi +} + +echo "Prepare GKE Service Accounts for project ${GCP_PROJECT_ID}" + +## Create Service Account for Cert Manager +export GCP_SA_CERT_MANAGER_NAME="tp-cert-manager-sa" + +# Check if the service account exists +EXISTING_SA=$(check_service_account "${GCP_SA_CERT_MANAGER_NAME}") +if [ -z "$EXISTING_SA" ]; then + ## Service Account for Cert Manager + create-gcp-k8s-sa "${GCP_PROJECT_ID}" "${GCP_SA_CERT_MANAGER_NAME}" "cert-manager/cert-manager" "roles/dns.admin" + if [ $? -ne 0 ]; then + echo "Failed to create service account ${GCP_SA_CERT_MANAGER_NAME}" + exit 1 + fi +else + echo "Service account ${GCP_SA_CERT_MANAGER_NAME} already exists." +fi + + +## Create Service Account for Cert Manager +export GCP_SA_EXTERNAL_DNS_NAME="tp-external-dns-sa" + +EXISTING_SA=$(check_service_account "${GCP_SA_EXTERNAL_DNS_NAME}") +if [ -z "$EXISTING_SA" ]; then + ## Service Account for External DNS + create-gcp-k8s-sa "${GCP_PROJECT_ID}" "${GCP_SA_EXTERNAL_DNS_NAME}" "external-dns-system/external-dns" "roles/dns.admin" + if [ $? -ne 0 ]; then + echo "Failed to create service account ${GCP_SA_EXTERNAL_DNS_NAME}" + exit 1 + fi +else + echo "Service account ${GCP_SA_EXTERNAL_DNS_NAME} already exists." +fi diff --git a/docs/recipes/tests/test-gcp.yaml b/docs/recipes/tests/test-gcp.yaml new file mode 100644 index 0000000..2ed3e6e --- /dev/null +++ b/docs/recipes/tests/test-gcp.yaml @@ -0,0 +1,28 @@ +# +# Copyright © 2024. Cloud Software Group, Inc. +# This file is subject to the license terms contained +# in the license file that is distributed with this file. +# + +# sample recipe +# We can use this recipe to test the GCP connection +apiVersion: v1 +kind: generic-runner +meta: + globalEnvVariable: + REPLACE_RECIPE: true + PIPELINE_LOG_DEBUG: false + GCP_PROJECT_ID: "" + # GCP_REGION: us-west1 + PIPELINE_INITIAL_ASSUME_ROLE: false + RUN1: true +tasks: + - condition: ${RUN1} + script: + ignoreErrors: false + base64Encoded: false + skip: false + fileName: script.sh + content: | + gcloud auth list + gcloud compute instances list