diff --git a/.gitignore b/.gitignore index d31c96326..73c69ce1b 100644 --- a/.gitignore +++ b/.gitignore @@ -2,9 +2,15 @@ **/charts/*.tgz **/requirements.lock + # Generated example yaml /build *.tar.gz # OSX trash .DS_Store + +#terratest +**/terratest/pkg/* +**/terratest/bin/* +**/terratest/src/test/vendor \ No newline at end of file diff --git a/.travis.yml b/.travis.yml index 00af88b7f..2e7a082d4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,9 +1,8 @@ ---- language: python env: global: - HELM_URL=https://storage.googleapis.com/kubernetes-helm - - HELM_TGZ=helm-v2.11.0-linux-amd64.tar.gz + - HELM_TGZ=helm-v2.14.0-linux-amd64.tar.gz - YAMLLINT_VERSION=1.15.0 install: # Install Helm @@ -11,43 +10,76 @@ install: - tar xzfv ${HELM_TGZ} - PATH=`pwd`/linux-amd64/:$PATH - helm init --client-only - # Install pip deps + # Install YamlLint - sudo pip install yamllint=="${YAMLLINT_VERSION}" + # Install Go + - wget -c https://storage.googleapis.com/golang/go1.7.3.linux-amd64.tar.gz + - sudo tar -C /usr/local -xvzf go1.7.3.linux-amd64.tar.gz + - export PATH=$PATH:/usr/local/go/bin script: - # Check YAML styling - yamllint -c .yamllint.yml -s $(find . -type f -name "Chart.yaml") - - yamllint -c .yamllint.yml -s $(find . -type f -name "values.yaml") + - yamllint -c .yamllint.yml -s $(find . -type f -name "values*.yaml") # Now load the helm dependencies - make dependencies # Run Helm lint - - helm lint ./charts/pega + - helm lint ./charts/pega + - helm lint ./charts/addons + # Run GO helm unit tests + - mkdir $TRAVIS_BUILD_DIR/terratest/bin + - export GOPATH=$TRAVIS_BUILD_DIR/terratest + - curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + - export PATH=$PATH:$TRAVIS_BUILD_DIR/terratest/bin + - cd terratest/src/test + - dep ensure + - go test deploy_test.go common_utility.go deployment_utility.go + - go test install_test.go common_utility.go installer_utility.go + - go test install_deploy_test.go common_utility.go deployment_utility.go installer_utility.go + - go test upgrade_test.go common_utility.go installer_utility.go + - go test upgrade_deploy_test.go common_utility.go deployment_utility.go installer_utility.go + - go test aks_deploy_test.go common_utility.go deployment_utility.go installer_utility.go + - go test eks_deploy_test.go common_utility.go deployment_utility.go + - go test openshift_test.go common_utility.go deployment_utility.go + - go test invalidAction_test.go + - cd $TRAVIS_BUILD_DIR + - chmod 777 before_deploy.sh +before_deploy: + - ./before_deploy.sh - make examples deploy: - provider: releases - api_key: - secure: > - GuZqxcgBOueUOJWg6t5NaCTIssfkptgdAjwyWm/6SaVXqs9Kdgy - EawtYSu0WLh815qGmuxgVjMbvvkAvtbBXanKH+BheRl1cQjYnza - XJlL+Oa/xxzCqz1n4txqtDMR7l/loqrMTzCRe2bZ806z3+uXpXh - CO12+LwvMDJAAddDkX+2wRE240/1f/0lVlZ2lNzgBitC63jAf1H - ml3/KrgHsTYvNB5M6qKn8C24DcC6PMe012K3hL0O+F+er9Mb39R - FcSLTjF/6AiLvpK4oDAg8sBgciXVq0FPhCXvjbINGB/AY7jM5Ql - 0A6ngngZjsjefn+NbtZ3aK230h2m0XM+dN7FJCbtRQeqvczcwRT - /wpDimpzFR0rf/NdXV4G+UZYI6a3bjqKYP5IPN8BbzVo8KzlbGW - JeJLV40gW7EbdyT57R2dNp46I17c1KL8atQH97n3yv9d9ZaC87O - 6tOohLmue7L3lKxPDpY60VFna5BjJwvrhW4gvsd11Ko4c39FSu8 - ogBSZ6E1EOlmVZDX+cBjuw4dGihcrHqWW5V3k2X/ksAKb+4A74g - AG8pCr/ifPEj9kTWb71IIF5RHkD6JqeGJdKaWFHDpWlYhKNTLRt - 3gmcgdLmM5MN9A5khwBzPYiF5oaWT1COxh/Rjdz5V0+jUvyRvc9 - a/rvc6hM6quXSmlAwA0U= - file: - - pega-kubernetes-example.tar.gz - - pega-openshift-example.tar.gz - - pega-azure-aks-example.tar.gz - - pega-aws-eks-example.tar.gz - - pega-google-gke-example.tar.gz - - pega-pivotal-pks-example.tar.gz - skip_cleanup: true - on: - repo: pegasystems/pega-helm-charts - tags: true + - provider: releases + api_key: + secure: > + GuZqxcgBOueUOJWg6t5NaCTIssfkptgdAjwyWm/6SaVXqs9Kdgy + EawtYSu0WLh815qGmuxgVjMbvvkAvtbBXanKH+BheRl1cQjYnza + XJlL+Oa/xxzCqz1n4txqtDMR7l/loqrMTzCRe2bZ806z3+uXpXh + CO12+LwvMDJAAddDkX+2wRE240/1f/0lVlZ2lNzgBitC63jAf1H + ml3/KrgHsTYvNB5M6qKn8C24DcC6PMe012K3hL0O+F+er9Mb39R + FcSLTjF/6AiLvpK4oDAg8sBgciXVq0FPhCXvjbINGB/AY7jM5Ql + 0A6ngngZjsjefn+NbtZ3aK230h2m0XM+dN7FJCbtRQeqvczcwRT + /wpDimpzFR0rf/NdXV4G+UZYI6a3bjqKYP5IPN8BbzVo8KzlbGW + JeJLV40gW7EbdyT57R2dNp46I17c1KL8atQH97n3yv9d9ZaC87O + 6tOohLmue7L3lKxPDpY60VFna5BjJwvrhW4gvsd11Ko4c39FSu8 + ogBSZ6E1EOlmVZDX+cBjuw4dGihcrHqWW5V3k2X/ksAKb+4A74g + AG8pCr/ifPEj9kTWb71IIF5RHkD6JqeGJdKaWFHDpWlYhKNTLRt + 3gmcgdLmM5MN9A5khwBzPYiF5oaWT1COxh/Rjdz5V0+jUvyRvc9 + a/rvc6hM6quXSmlAwA0U= + file: + - pega-kubernetes-example.tar.gz + - pega-openshift-example.tar.gz + - pega-azure-aks-example.tar.gz + - pega-aws-eks-example.tar.gz + - pega-google-gke-example.tar.gz + - pega-pivotal-pks-example.tar.gz + skip_cleanup: true + on: + repo: pegasystems/pega-helm-charts + tags: true + - provider: bintray + file: descriptor.json + user: bintrayautomation + key: + secure: M1CPmmjJ1OI2luWFRW9Aqregj3b4rQLSnP4ROurkHkewB+HwoZ+QHAQ4JJD0B+br2AddaGKgN1rdzYpyvg0Cl7ZhWnMb96bE+lVbmDzYnUc+Iwd8/PWXNoaFuB5sn5LPVESe8H/ZLdP0IyMbBknBDgdNgKpD9XqH7/UrrNT9jvZSpGFAcMCwLVFiQ1auusnSLOYVr611JadpNZarx7QgPLBtlit+cfi/vEGy7GyYTtFzSbM86gBnm1kViSVM4w1bE4cFs+14KVNiNiwpSzFzK/gBncqQVTz6yaD3bV48Cla5sz3/sDkh3If7pUhcmMYyy9BPk35176TWTHKcqATv/5GxDvQpgaBMlyl0Dhwhb2d++ZZ6V+kYYj+Uf4m4NaxGV6WNrUnW+IPw9qlBWFHnw17u80HU5GapXXNedUi2CjzrH25A0Y6DHqfZubDBXJh31RfWKz0r58bLSGLYxdl96purfLUCeSrsKkZAWS59EmpXiR/f+zSbNmlpG+ryVBsQIPG+cwdPSVUdGBGVp3QzEjVze6gABSQ/2GaMeRterjzBypi8ab4vbIyxY905deoMAwcQHUcvybpNJfRcaowToK0BnMNhy8pn26KYct3oGLVxQe6rlGbQNZrFePSCyNd/ZfYlkWDcLh/j0OH0gdspHTdDfsy8dgS8QzvgNsNKRHk= + skip_cleanup: true + on: + all_branches: true + tags: true \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..0570fb4d5 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,40 @@ +# How to contribute + +## Getting Started + +* Make sure you have a [GitHub account](https://github.com/signup/free) +* Submit a ticket for your issue, assuming one does not already exist. + * Clearly describe the issue including steps to reproduce when it is a bug. + * Make sure you fill in the earliest version that you know has the issue. +* Fork the repository on GitHub + +## Making Changes + +* Create a topic branch from where you want to base your work. + * This is usually the master branch. + * Only target release branches if you are certain your fix must be on that + branch. + * To quickly create a topic branch based on master; `git checkout -b + fix/master/my_contribution master`. Please avoid working directly on the + `master` branch. +* Make commits of logical units. +* Check for unnecessary whitespace with `git diff --check` before committing. +* Make sure your commit messages are in the proper format: + +`ISSUE-1234: terse and to the point message describing change` + +* Make sure you have added the necessary tests for your changes. +* Run _all_ the tests to assure nothing else was accidentally broken. +* Make sure you've done a squash and rebase before submitting + +## Submitting Changes + +* Push your changes to a topic branch in your fork of the repository. +* Submit a pull request. +* After feedback has been given we expect responses within two weeks. After two + weeks we may close the pull request if it isn't showing any activity. + +# Additional Resources + +* [General GitHub documentation](https://help.github.com/) +* [GitHub pull request documentation](https://help.github.com/send-pull-requests/) diff --git a/Makefile b/Makefile index 315b3bd81..fbc780e59 100644 --- a/Makefile +++ b/Makefile @@ -1,64 +1,66 @@ dependencies: helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/ - helm repo list + helm repo add stable https://kubernetes-charts.storage.googleapis.com + helm repo list helm dependency update ./charts/pega/ + helm dependency update ./charts/addons/ -examples: dependencies +examples: mkdir -p ./build/kubernetes helm template ./charts/pega/ \ --output-dir ./build/kubernetes \ --values ./charts/pega/values.yaml \ --namespace example \ - --set provider=k8s \ - --set actions.execute=deploy - tar -C ./build/kubernetes/pega/templates -cvzf ./pega-kubernetes-example.tar.gz . + --set global.provider=k8s \ + --set global.actions.execute=deploy + tar -C ./build/kubernetes/pega -cvzf ./pega-kubernetes-example.tar.gz . mkdir -p ./build/openshift helm template ./charts/pega/ \ --output-dir ./build/openshift \ --values ./charts/pega/values.yaml \ --namespace example \ - --set provider=openshift \ - --set actions.execute=deploy - tar -C ./build/openshift/pega/templates -cvzf ./pega-openshift-example.tar.gz . + --set global.provider=openshift \ + --set global.actions.execute=deploy + tar -C ./build/openshift/pega -cvzf ./pega-openshift-example.tar.gz . mkdir -p ./build/aws-eks helm template ./charts/pega/ \ --output-dir ./build/aws-eks \ --values ./charts/pega/values.yaml \ --namespace example \ - --set provider=eks \ - --set actions.execute=deploy - tar -C ./build/aws-eks/pega/templates -cvzf ./pega-aws-eks-example.tar.gz . + --set global.provider=eks \ + --set global.actions.execute=deploy + tar -C ./build/aws-eks/pega -cvzf ./pega-aws-eks-example.tar.gz . mkdir -p ./build/azure-aks helm template ./charts/pega/ \ --output-dir ./build/azure-aks \ --values ./charts/pega/values.yaml \ --namespace example \ - --set provider=aks \ - --set actions.execute=deploy - tar -C ./build/azure-aks/pega/templates -cvzf ./pega-azure-aks-example.tar.gz . + --set global.provider=aks \ + --set global.actions.execute=deploy + tar -C ./build/azure-aks/pega -cvzf ./pega-azure-aks-example.tar.gz . mkdir -p ./build/google-gke helm template ./charts/pega/ \ --output-dir ./build/google-gke \ --values ./charts/pega/values.yaml \ --namespace example \ - --set provider=gke \ - --set actions.execute=deploy - tar -C ./build/google-gke/pega/templates -cvzf ./pega-google-gke-example.tar.gz . + --set global.provider=gke \ + --set global.actions.execute=deploy + tar -C ./build/google-gke/pega -cvzf ./pega-google-gke-example.tar.gz . mkdir -p ./build/pivotal-pks helm template ./charts/pega/ \ --output-dir ./build/pivotal-pks \ --values ./charts/pega/values.yaml \ --namespace example \ - --set provider=pks \ - --set actions.execute=deploy - tar -C ./build/pivotal-pks/pega/templates -cvzf ./pega-pivotal-pks-example.tar.gz . + --set global.provider=pks \ + --set global.actions.execute=deploy + tar -C ./build/pivotal-pks/pega -cvzf ./pega-pivotal-pks-example.tar.gz . clean: rm -rf ./build rm -rf ./charts/pega/charts/* - rm -rf ./*.tar.gz + rm -rf ./*.tar.gz \ No newline at end of file diff --git a/README.md b/README.md index 55c2d31ea..328da6207 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,22 @@ -# Pega Deployment on Kubernetes +# Pega deployment on Kubernetes -This project provides Helm charts and basic examples for deploying Pega on Kubernetes. This project **does not include** the required database installation image which you [may obtain from the Pega Community](https://community.pega.com/knowledgebase/products/platform/deploy). Deploying Pega on Kubernetes requires Pega Infinity 8.2 or newer. +This project provides Helm charts and basic examples for deploying Pega on Kubernetes. You will also need to download the required [installation kit](https://community.pega.com/knowledgebase/products/platform/deploy) from the Pega Community which includes rules and data to preload into your relational database. Deploying Pega on Kubernetes requires Pega Infinity 8.2 or newer. [![Build Status](https://travis-ci.org/pegasystems/pega-helm-charts.svg?branch=master)](https://travis-ci.org/pegasystems/pega-helm-charts) [![GitHub release](https://img.shields.io/github/release/pegasystems/pega-helm-charts.svg)](https://github.com/pegasystems/pega-helm-charts/releases) -## Getting Started +# Supported Kubernetes environments + +Pegasystems has validated deployments on the following Kubernetes IaaS and PaaS environments. + +* Open-source Kubernetes (and [MiniKube for personal deployments](docs/RUNBOOK_MINIKUBE.md)) +* Microsoft Azure Kubernetes Service (AKS) +* Amazon Elastic Kubernetes Service (EKS) +* Google Kubernetes Engine (GKE) +* Red Hat OpenShift +* Pivotal Container Service (PKS) + +# Getting started This project assumes you have an installation of Kubernetes available and have Helm installed locally. The following commands will verify your installation. The exact output may be slightly different, but they should return without error. ```console @@ -22,12 +33,52 @@ Server: &version.Version{SemVer:"v2.12.2", GitCommit:"7d2b0c73d734f6586ed222a567 Start by performing a clone (or download) of the latest Charts. -`` git clone https://github.com/pegasystems/pega-helm-charts.git `` +```bash +git clone https://github.com/pegasystems/pega-helm-charts.git +``` -Navigate to the project directory and open the values.yaml file. This is the configuration file that tells Helm what and how to deploy. For additional documentation covering the different deployment options, see the Pega Community article on [Deploying the Pega Platform by using Kubnernetes](https://community.pega.com/knowledgebase/articles/deploying-pega-platform-using-kubernetes). +## Update dependencies -## Dependencies +The Pega charts depends on other charts supplied by third parties. These are called out in the requirements yaml file for the [pega](charts/pega/requirements.yaml) and [addons](charts/addons/requirements.yaml) charts. Individual dependencies may or may not be deployed based on the configuration of your values.yaml files. When you first setup your helm chart, you will need to update your dependencies to pull down these additional charts from their repositories. For convenience, the required commands are part of the [Makefile](Makefile) and can run with the following command. -The Pega chart depends on other charts supplied by third parties. These are called out in the [requirements yaml file](charts/pega/requirements.yaml). Individual dependencies may or may not be deployed based on the configuration of your values.yaml file. When you first setup your helm chart, you will need to update your dependencies to pull down these additional charts from their repositories. For convenience, the required commands are part of the [Makefile](Makefile) and can run with the ```make dependencies``` command. +```bash +make dependencies +``` For more information about Helm dependencies, see the [Helm documentation](https://helm.sh/docs/helm/#helm-dependency). + +## Configure and install using the charts + +There are two charts available in this repository - *addons* and *pega*. + +The addons chart installs a collection of supporting services and tools required for a Pega deployment. The services you will need to deploy will depend on your cloud environment - for example you may need a load balancer on Minikube, but not for EKS. These supporting services are deployed once per Kubernetes environment, regardless of how many Pega Infinity instances are deployed. + +[Instructions to configure the Pega addons](charts/addons/README.md) + +To install the addons chart, run the following helm command after configuring your values.yaml file. + +```bash +helm install . -n pegaaddons --namespace pegaaddons --values /home/user/my-overridden-values.yaml +``` + +After installing the addons, you can deploy Pega. Before installing using the chart, it is a good idea to review the detailed [deployment guide](https://community.pega.com/knowledgebase/articles/deploying-pega-platform-using-kubernetes) to understand how Pega deploys as a distributed system. Running a Helm installation using the pega chart installs a Pega Infinity instance into a specified namespace. + +[Instructions to configure the Pega chart](charts/pega/README.md) + +To install the pega chart, run the following helm command after configuring your values.yaml file. + +```bash +helm install . -n mypega --namespace myproject --values /home/user/my-overridden-values.yaml +``` + +To delete this chart, enter: + +```bash +helm delete mypega --purge +``` + +Navigate to the project directory and open the values.yaml file. This is the configuration file that tells Helm what and how to deploy. For additional documentation covering the different deployment options, see the Pega Community article on [Deploying the Pega Platform by using Kubnernetes](https://community.pega.com/knowledgebase/articles/deploying-pega-platform-using-kubernetes). + +# Contributing + +This is an open source project and contributions are welcome. Please see the [contributing guidelines](./CONTRIBUTING.md) to get started. diff --git a/before_deploy.sh b/before_deploy.sh new file mode 100644 index 000000000..f98ae8205 --- /dev/null +++ b/before_deploy.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +# CHART_VERSION is computed from the TAG details of the commit. Every Github release creates tag with the release name. +# Release name (or) Tag name should be in vX.X.X format. Helm CHART_VERSION would be X.X.X +export CHART_VERSION=$(expr ${TRAVIS_TAG:1}) +export PEGA_FILE_NAME=pega-${CHART_VERSION}.tgz +export ADDONS_FILE_NAME=addons-${CHART_VERSION}.tgz +cat descriptor-template.json | jq '.files[0].includePattern=env.PEGA_FILE_NAME' | jq '.files[0].uploadPattern=env.PEGA_FILE_NAME' | jq '.files[1].includePattern=env.ADDONS_FILE_NAME' | jq '.files[1].uploadPattern=env.ADDONS_FILE_NAME' > descriptor.json +curl -o index.yaml https://dl.bintray.com/pegasystems/pega-helm-charts/index.yaml +helm package --version ${CHART_VERSION} ./charts/pega/ +helm package --version ${CHART_VERSION} ./charts/addons/ +helm repo index --merge index.yaml --url https://dl.bintray.com/pegasystems/pega-helm-charts/ . \ No newline at end of file diff --git a/charts/addons/.helmignore b/charts/addons/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/charts/addons/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/addons/Chart.yaml b/charts/addons/Chart.yaml new file mode 100644 index 000000000..f72a0b21a --- /dev/null +++ b/charts/addons/Chart.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: addons +version: 1.2.0 diff --git a/charts/addons/README.md b/charts/addons/README.md new file mode 100644 index 000000000..7ef496975 --- /dev/null +++ b/charts/addons/README.md @@ -0,0 +1,137 @@ +# Addons Helm chart + +The addons chart installs a collection of supporting services and tools required for a Pega deployment. The services you will need to deploy will depend on your cloud environment - for example you may need a load balancer on Minikube, but not for EKS. These supporting services are deployed once per Kubernetes environment, regardless of how many Pega Infinity instances are deployed. This readme provides a detailed description of possible configurations and their default values as applicable. + +## Load balancer + +Environment | Suggested load balancer +--- | --- +Open-source Kubernetes | Traefik +Red Hat Openshift | HAProxy (Using the `roundrobin` load balancer strategy) +Amazon Elastic Kubernetes Service (EKS) | Amazon Load Balancer (ALB) +Google Kubernetes Engine (GKE) | Traefik +Pivotal Container Service (PKS) | Traefik +Microsoft Azure Kubernetes Service (AKS) | Traefik + +### Traefik + +Deploying Pega Infinity with more than one Pod typically requires a load balancer to ensure that traffic is routed equally. Some IaaS and PaaS providers supply a load balancer and some do not. If a native load balancer is not provided and configured, or the load balancer does not support cookie based session affinity, Traefik may be used instead. If you do not wish to deploy Traefik, set `traefik.enabled` to `false` in the addons values.yaml configuration. For more configuration options available for Traefik, see the [Traefik Helm chart](https://github.com/helm/charts/blob/master/stable/traefik/values.yaml). + +Example: + +```yaml +traefik: + enabled: true + serviceType: NodePort + ssl: + enabled: false + rbac: + enabled: true + service: + nodePorts: + http: 30080 + https: 30443 + resources: + requests: + cpu: 200m + memory: 200Mi + limits: + cpu: 500m + memory: 500Mi +``` + +### Amazon ALB + +If deploying on Amazon Elastic Kubernetes Service (EKS), you can use the native Amazon Load Balancer (ALB). Set `traefik.enabled` to `false` and `aws-alb-ingress-controller.enabled` to `true`. + +Configuration | Usage +--- | --- +`clusterName` | The name of your EKS cluster. Resources created by the ALB Ingress controller will be prefixed with this string. +`autoDiscoverAwsRegion` | Auto discover awsRegion from ec2metadata, set this to true and omit awsRegion when ec2metadata is available. +`awsRegion` | AWS region of the EKS cluster. Required if if ec2metadata is unavailable from the controller Pod or if `autoDiscoverAwsRegion` is not `true`. +`autoDiscoverAwsVpcID` | Auto discover awsVpcID from ec2metadata, set this to true and omit awsVpcID when ec2metadata is available. +`awsVpcID` | VPC ID of EKS cluster, required if ec2metadata is unavailable from controller pod. Required if if ec2metadata is unavailable from the controller Pod or if `autoDiscoverAwsVpcID` is not `true`. +`extraEnv.AWS_ACCESS_KEY_ID` and `extraEnv.AWS_SECRET_ACCESS_KEY` | The access key and secret access key with access to configure AWS resources. + +Example: + +```yaml +aws-alb-ingress-controller: + enabled: false + clusterName: "YOUR_EKS_CLUSTER_NAME" + autoDiscoverAwsRegion: true + awsRegion: "YOUR_EKS_CLUSTER_REGION" + autoDiscoverAwsVpcID: true + awsVpcID: "YOUR_EKS_CLUSTER_VPC_ID" + extraEnv: + AWS_ACCESS_KEY_ID: "YOUR_AWS_ACCESS_KEY_ID" + AWS_SECRET_ACCESS_KEY: "YOUR_AWS_SECRET_ACCESS_KEY" +``` + +## Aggregated logging + +Environment | Suggested logging tools +--- | --- +Open-source Kubernetes | EFK +Red Hat Openshift | Built-in EFK +Amazon Elastic Kubernetes Service (EKS) | Built-in EFK +Google Kubernetes Engine (GKE) | Stackdriver +Pivotal Container Service (PKS) | EFK +Microsoft Azure Kubernetes Service (AKS) | Azure Monitor + +## Logging with Elasticsearch-Fluentd-Kibana (EFK) + +EFK is a standard logging stack that is provided as an example for ease of getting started in environments that do not have aggregated logging configured such as open-source Kubernetes. Other IaaS and PaaS providers typically include a logging system out of the box. You may enable the three components of EFK ([Elasticsearch](https://github.com/helm/charts/tree/master/stable/elasticsearch/values.yaml),[Fluentd](https://github.com/helm/charts/tree/master/stable/fluentd-elasticsearch/values.yaml), and [Kibana](https://github.com/helm/charts/tree/master/stable/kibana/values.yaml)) in the addons values.yaml file to deploy EFK automatically. For more configuration options available for each of the components, see their Helm Charts. + +Example: + +```yaml + +deploy_efk: &deploy_efk true + +elasticsearch: + enabled: *deploy_efk + fullnameOverride: "elastic-search" + +kibana: + enabled: *deploy_efk + files: + kibana.yml: + elasticsearch.url: http://elastic-search-client:9200 + service: + externalPort: 80 + ingress: + + enabled: true + # Enter the domain name to access kibana via a load balancer. + hosts: + - "YOUR_WEB.KIBANA.EXAMPLE.COM" + +fluentd-elasticsearch: + enabled: *deploy_efk + elasticsearch: + host: elastic-search-client + buffer_chunk_limit: 250M + buffer_queue_limit: 30 + +``` + +## Metrics + +Environment | Suggested metrics server +--- | --- +Open-source Kubernetes | Metrics server +All others | Built-in metrics server + +Autoscaling in Kubernetes requires the use of a metrics server, a cluster-wide aggregator of resource usage data. Most PaaS and IaaS providers supply a metrics server, but if you wish to deploy into open source kubernetes, you will need to supply your own. + +See the [metrics-server Helm chart](https://github.com/helm/charts/blob/master/stable/metrics-server/values.yaml) for additional parameters. + +Example: + +```yaml +metrics-server: + enabled: true + args: + - --logtostderr +``` diff --git a/charts/addons/requirements.yaml b/charts/addons/requirements.yaml new file mode 100644 index 000000000..5b578dcbb --- /dev/null +++ b/charts/addons/requirements.yaml @@ -0,0 +1,25 @@ +dependencies: +- name: traefik + version: "1.77.1" + repository: https://kubernetes-charts.storage.googleapis.com + condition: traefik.enabled +- name: aws-alb-ingress-controller + version: "0.1.10" + repository: https://kubernetes-charts-incubator.storage.googleapis.com/ + condition: aws-alb-ingress-controller.enabled +- name: elasticsearch + version: "1.15.1" + repository: https://kubernetes-charts.storage.googleapis.com/ + condition: elasticsearch.enabled +- name: fluentd-elasticsearch + version: "1.5.0" + repository: https://kubernetes-charts.storage.googleapis.com/ + condition: fluentd-elasticsearch.enabled +- name: kibana + version: "1.1.0" + repository: https://kubernetes-charts.storage.googleapis.com/ + condition: kibana.enabled +- name: metrics-server + version: "2.8.4" + repository: https://kubernetes-charts.storage.googleapis.com/ + condition: metrics-server.enabled \ No newline at end of file diff --git a/charts/addons/values.yaml b/charts/addons/values.yaml new file mode 100644 index 000000000..c7e2a9d22 --- /dev/null +++ b/charts/addons/values.yaml @@ -0,0 +1,119 @@ +--- +# Configure Traefik for load balancing: +# If enabled: true, Traefik is deployed automatically. +# If enabled: false, Traefik is not deployed and load balancing must be configured manually. +# Pega recommends enabling Traefik on providers other than Openshift and eks. +# On Openshift, Traefik is ignored and Pega uses Openshift's built-in load balancer. +# On eks it is recommended to use aws alb ingress controller. +traefik: + enabled: true + # Set any additional Traefik parameters. These values will be used by Traefik's helm chart. + # See https://github.com/helm/charts/blob/master/stable/traefik/values.yaml + # Set traefik.serviceType to "LoadBalancer" on gke, aks, and pks + serviceType: NodePort + # If enabled is set to "true", ssl will be enabled for traefik + ssl: + enabled: false + rbac: + enabled: true + service: + nodePorts: + # NodePorts for traefik service. + http: 30080 + https: 30443 + resources: + requests: + # Enter the CPU Request for traefik + cpu: 200m + # Enter the memory request for traefik + memory: 200Mi + limits: + # Enter the CPU Limit for traefik + cpu: 500m + # Enter the memory limit for traefik + memory: 500Mi +# Set this to true to install aws-alb-ingress-controller. Follow below guidelines specific to each provider, +# For EKS - set this to true. +# GKE or AKS or K8s or Openshift - set this to false and enable traefik. +aws-alb-ingress-controller: + enabled: false + ## Resources created by the ALB Ingress controller will be prefixed with this string + clusterName: "YOUR_EKS_CLUSTER_NAME" + ## Auto Discover awsRegion from ec2metadata, set this to true and omit awsRegion when ec2metadata is available. + autoDiscoverAwsRegion: true + ## AWS region of k8s cluster, required if ec2metadata is unavailable from controller pod + ## Required if autoDiscoverAwsRegion != true + awsRegion: "YOUR_EKS_CLUSTER_REGION" + ## Auto Discover awsVpcID from ec2metadata, set this to true and omit awsVpcID: " when ec2metadata is available. + autoDiscoverAwsVpcID: true + ## VPC ID of k8s cluster, required if ec2metadata is unavailable from controller pod + ## Required if autoDiscoverAwsVpcID != true + awsVpcID: "YOUR_EKS_CLUSTER_VPC_ID" + extraEnv: + AWS_ACCESS_KEY_ID: "YOUR_AWS_ACCESS_KEY_ID" + AWS_SECRET_ACCESS_KEY: "YOUR_AWS_SECRET_ACCESS_KEY" +# Configure EFK stack for logging: +# For a complete EFK stack: elasticsearch, fluentd-elasticsearch, and kibana should all be enabled +# Pega recommends deploying EFK only on k8s +# On Openshift, see https://docs.openshift.com/container-platform/3.11/install_config/aggregate_logging.html +# On EKS, see https://eksworkshop.com/logging/ + +# Replace false with true to deploy EFK. +# Do not remove &deploy_efk; it is a yaml anchor which is referenced by the EFK subcharts. +deploy_efk: &deploy_efk false + +elasticsearch: + enabled: *deploy_efk + # Set any additional elastic search parameters. These values will be used by elasticsearch helm chart. + # See https://github.com/helm/charts/tree/master/stable/elasticsearch/values.yaml + # + # If you need to change this value then you will also need to replace the same + # part of the value within the following properties further below: + # + # kibana.files.kibana.yml.elasticsearch.url + # fluentd-elasticsearch.elasticsearch.host + # + fullnameOverride: "elastic-search" + +kibana: + enabled: *deploy_efk + # Set any additional kibana parameters. These values will be used by Kibana's helm chart. + # See https://github.com/helm/charts/tree/master/stable/kibana/values.yaml + files: + kibana.yml: + elasticsearch.url: http://elastic-search-client:9200 + service: + externalPort: 80 + ingress: + # If enabled is set to "true", an ingress is created to access kibana. + enabled: true + # Enter the domain name to access kibana via a load balancer. + hosts: + - "YOUR_WEB.KIBANA.EXAMPLE.COM" + +fluentd-elasticsearch: + enabled: *deploy_efk + # Set any additional fluentd-elasticsearch parameters. These values will be used by fluentd-elasticsearch's helm chart. + # See https://github.com/helm/charts/tree/master/stable/fluentd-elasticsearch/values.yaml + elasticsearch: + host: elastic-search-client + buffer_chunk_limit: 250M + buffer_queue_limit: 30 + +metrics-server: + # Set this to true to install metrics-server. Follow below guidelines specific to each provider, + # open-source Kubernetes, Openshift & EKS - mandatory to set this to true if any tier as hpa.enabled is true + # GKE or AKS - set this to false since metrics-server is installed in the cluster by default. + enabled: true + # Set any additional metrics-server parameters. These values will be used by metrics-server's helm chart. + # See https://github.com/helm/charts/blob/master/stable/metrics-server/values.yaml + args: + - --logtostderr +# The order in which to consider different Kubelet node address types when connecting to Kubelet. +# Uncomment below arguemnt if host names are not resolvable from metrics server pod. +# This setting is not required for public cloud providers & openshift enterprise. It may be required for open-source Kubernetes. +# - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP +# Uncomment below arguemnt to skip verifying Kubelet CA certificates. +# Not recommended for production usage, but can be useful in test clusters with self-signed Kubelet serving certificates. +# This setting is not required for public cloud providers & openshift enterprise. It may be required for open-source Kubernetes. +# - --kubelet-insecure-tls diff --git a/charts/pega/Chart.yaml b/charts/pega/Chart.yaml index 50134b1d7..219004b9f 100644 --- a/charts/pega/Chart.yaml +++ b/charts/pega/Chart.yaml @@ -1,6 +1,6 @@ --- name: pega -version: "1.0.0" +version: "1.2.0" description: Pega installation on kubernetes keywords: - pega diff --git a/charts/pega/README.md b/charts/pega/README.md new file mode 100644 index 000000000..111f5f248 --- /dev/null +++ b/charts/pega/README.md @@ -0,0 +1,357 @@ +# Pega Helm chart + +The Pega Helm chart is used to deploy an instance of Pega Infinity into a Kubernetes environment. This readme provides a detailed description of possible configurations and their default values as applicable. + +## Supported providers + +Enter your Kubernetes provider which will allow the Helm charts to configure to any differences between deployment environments. + +Value | Deployment target +--- | --- +k8s | Open-source Kubernetes +openshift | Red Hat Openshift +eks | Amazon Elastic Kubernetes Service (EKS) +gke | Google Kubernetes Engine (GKE) +pks | Pivotal Container Service (PKS) +aks | Microsoft Azure Kubernetes Service (AKS) + +Example for a kubernetes environment: + +```yaml +provider: "k8s" +``` + +## JDBC Configuration + +Use the `jdbc` section of the values file to specify how to connect to the Pega database. *Pega must be installed to this database before deploying on Kubernetes*. + +### URL and Driver Class +These required connection details will point Pega to the correct database and provide the type of driver used to connect. Examples of the correct format to use are provided below. + +Example for Oracle: +```yaml +jdbc: + url: jdbc:oracle:thin:@//YOUR_DB_HOST:1521/YOUR_DB_NAME + driverClass: oracle.jdbc.OracleDriver +``` +Example for Microsoft SQL Server: +```yaml +jdbc: + url: jdbc:sqlserver://YOUR_DB_HOST:1433;databaseName=YOUR_DB_NAME;selectMethod=cursor;sendStringParametersAsUnicode=false + driverClass: com.microsoft.sqlserver.jdbc.SQLServerDriver +``` + +Example for IBM DB2 for LUW: +```yaml +jdbc: + url: jdbc:db2://YOUR_DB_HOST:50000/YOUR_DB_NAME:fullyMaterializeLobData=true;fullyMaterializeInputStreams=true;progressiveStreaming=2;useJDBC4ColumnNameAndLabelSemantics=2; + driverClass: com.ibm.db2.jcc.DB2Driver +``` + +Example for IBM DB2 for z/OS: +```yaml +jdbc: + url: jdbc:db2://YOUR_DB_HOST:50000/YOUR_DB_NAME + driverClass: com.ibm.db2.jcc.DB2Driver +``` + +Example for PostgreSQL: +```yaml +jdbc: + url: jdbc:postgresql://YOUR_DB_HOST:5432/YOUR_DB_NAME + driverClass: org.postgresql.Driver +``` + +### Driver URI + +Pega requires a database driver JAR to be provided for connecting to the relational database. This JAR may either be baked into your image by extending the Pega provided Docker image, or it may be pulled in dynamically when the container is deployed. If you want to pull in the driver during deployment, you will need to specify a URL to the driver using the `jdbc.driverUri` parameter. This address must be visible and accessable from the process running inside the container. + +### Authentication + +The simplest way to provide database authorization is via the `jdbc.username` and `jdbc.password` parameters. These values will create a Kubernetes Secret and at runtime will be obfuscated and stored in a secrets file. + +### Connection Properties + +You may optionally set your connection properties that will be sent to our JDBC driver when establishing new connections. The format of the string is `[propertyName=property;]`. + +### Schemas + +It is standard practice to have seperate schemas for your rules and data. You may specify them as `rulesSchema` and `dataSchema`. If desired, you may also optionally set the `customerDataSchema` for your database. The `customerDataSchema` defaults to value of `dataSchema` if not specified. Additional schemas can be defined within Pega. + + Example: + + ```yaml +jdbc: + ... + rulesSchema: "rules" + dataSchema: "data" + customerDataSchema: "" +``` + +## Docker + +Specify the location for the Pega Docker image. This image is available on DockerHub, but can also be mirrored and/or extended with the use of a private registry. Specify the `url` of the image or use the default of pegasystems/pega. + +When using a private registry that requires a username and password, specify them using the `docker.registry.username` and `docker.registry.password` parameters. + +Note: the `imagePullPolicy` is always for all images in this deployment by default. + +Example: + + ```yaml +docker: + registry: + url: "YOUR_DOCKER_REGISTRY" + username: "YOUR_DOCKER_REGISTRY_USERNAME" + password: "YOUR_DOCKER_REGISTRY_PASSWORD" +``` + +## Tiers of a Pega deployment + +Pega supports deployment using a multi-tier architecture to separate processing and functions. Isolating processing in its own tier also allows for unique deployment configuration such as its own prconfig, resource allocations, or scaling characteristics. Use the `tier` section in the helm chart to specify which tiers you wish to deploy and their logical tasks. + +### Tier examples + +Three values.yaml files are provided to showcase real world deployment examples. These examples can be used as a starting point for customization and are not expected to deployed as-is. + +For more information about the architecture for how Pega Platform runs in a Pega cluster, see [How Pega Platform and applications are deployed on Kubernetes](https://community.pega.com/knowledgebase/articles/cloud-choice/how-pega-platform-and-applications-are-deployed-kubernetes). + +#### Standard deployment using three tiers + +To provision a three tier Pega cluster, use the default example in the in the helm chart, which is a good starting point for most deployments: + +Tier name | Description +--- |--- +web | Interactive, foreground processing nodes that are exposed to the load balancer. Pega recommends that these node use the node classification “WebUser” `nodetype`. +batch | Background processing nodes which handle workloads for non-interactive processing. Pega recommends that these node use the node classification “BackgroundProcessing” `nodetype`. These nodes should not be exposed to the load balancer. +stream | Nodes that run an embedded deployment of Kafka and are exposed to the load balancer. Pega recommends that these node use the node classification “Stream” `nodetype`. + +#### Small deployment with a single tier + +To get started running a personal deployment of Pega on kubernetes, you can handle all processing on a single tier. This configuration provides the most resource utilization efficiency when the characteristics of a production deployment are not necessary. The [values-small.yaml](relative-link-here) configuration provides a starting point for this simple model. + +Tier Name | Description +--- | --- +pega | One tier handles all foreground and background processing and is given a `nodeType` of "WebUser,BackgroundProcessing,search,Stream". + +#### Large deployment for production isolation of processing + +To run a larger scale Pega deployment in production, you can split additional processing out to dedicated tiers. The [values-large.yaml](relative-link-here) configuration provides an example of a multi-tier deployment that Pega recommends as a good starting point for larger deployments. + +Tier Name | Description +--- | --- +web | Interactive, foreground processing nodes that are exposed to the load balancer. Pega recommends that these node use the node classification “WebUser” `nodetype`. +batch | Background processing nodes which handle some of the non-interactive processing. Pega recommends that these node use the node classification “BackgroundProcessing,Search,Batch” `nodetype`. These nodes should not be exposed to the load balancer. +stream | Nodes that run an embedded deployment of Kafka and are exposed to the load balancer. Pega recommends that these node use the node classification “Stream” `nodetype`. +bix | Nodes dedicated to BIX processing can be helpful when the BIX workload has unique deployment or scaling characteristics. Pega recommends that these node use the node classification “Bix” `nodetype`. These nodes should not be exposed to the load balancer. + +### Name (*Required*) + +Use the `tier` section in the helm chart to specify the name of each tier configuration in order to label a tier in your Kubernetes deployment. This becomes the name of the tier's replica set in Kubernetes. + +Example: + +```yaml +name: "web" +``` + +### nodeType (*Required*) + +Node classification is the process of separating nodes by purpose, predefining their behavior by assigning node types. When you associate a work resource with a specific node type,you optimize work performance in your Pega application. For more information, see +[Node classification](https://community.pega.com/sites/default/files/help_v83/procomhelpmain.htm#engine/node-classification/eng-node-classification-con.htm). + +Specify the list of Pega node types for this deployment. For more information about valid node types, see the Pega Community article on [Node Classification]. + +[Node types for client-managed cloud environments](http://doc-build02.rpega.com/docs-oxygen/procomhelpmain.htm#engine/node-classification/eng-node-types-client-managed-cloud-ref.htm) + +Example: + +```yaml +nodeType: ["WebUser","bix"] +``` + +### service (*Optional*) + +Specify that the Kubernetes service block is expose to other Kubernetes run services, or externally to systems outside the environment. The name of the service will be based on the tier's name, so if your tier is `"web"`, your service name will be `"pega-web"`. If you omit `service`, no Kubenretes service object is created for the tier during the deployment. For more information on services, see the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/services-networking/service/]). + +Configuration parameters: + +- `domain` - specify a domain on your network in which you create an ingress to the service. If not specified, no ingress is created. + +- `port` and `targetPort` - specify values other than the web node defaults of `80` and `8080`, respectively, if required for your networking domain. You can use these settings for external access to the stream tier when required. + +- `alb_stickiness_lb_cookie_duration_seconds` - when deploying on Amazon EKS, configure alb cookie duration seconds equal to passivation time of requestors. By default this is `3660`, or just over one hour. + +Example: + +```yaml +service: + domain: "tier.example.com" + port: 1234 + targetPort: 1234 +``` + + +### Managing Resources + +You can optionally configure the resource allocation and limits for a tier using the following parameters. The default value is used if you do not specify an alternative value. See [Managing Kubernetes Resources] for more information about how Kubernetes manages resources. + +Parameter | Description | Default value +--- | --- | --- +`replicas` | Specify the number of Pods to deploy in the tier. | `1` +`cpuRequest` | Initial CPU request for pods in the current tier. | `200m` +`cpuLimit` | CPU limit for pods in the current tier. | `2` +`memRequest` | Initial memory request for pods in the current tier. | `6Gi` +`memLimit` | Memory limit for pods in the current tier. | `8Gi` +`initialHeap` | This specifies the initial heap size of the JVM. | `4096m` +`maxHeap` | This specifies the maximum heap size of the JVM. | `7168m` + +### Using a Kubernetes Horizontal Pod Autoscaler (HPA) + +You may configure an HPA to scale your tier on a specified metric. Only tiers that do not use volume claims are scalable with an HPA. Set `hpa.enabled` to `true` in order to deploy an HPA for the tier. For more details, see the [Kubernetes HPA documentation](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/). + +Parameter | Description | Default value +--- | --- | --- +`hpa.minReplicas` | Minimum number of replicas that HPA can scale-down | `1` +`hpa.maxReplicas` | Maximum number of replicas that HPA can scale-up | `5` +`hpa.targetAverageCPUUtilization` | Threshold value for scaling based on initial CPU request utilization (The default value is `700` which corresponds to 700% of 200m ) | `700` +`hpa.targetAverageMemoryUtilization` | Threshold value for scaling based on initial memory utilization (The default value is `85` which corresponds to 85% of 6Gi ) | `85` + +### Pega configuration files + +While default configuration files are included by default, the Helm charts provide extension points to override them with additional customizations. To change the configuration file, specify a relative path to a local implementation to be injected into a ConfigMap. + +Parameter | Description | Default value +--- | --- | --- +`prconfigPath` | The location of a [prconfig.xml](config/deploy/prconfig.xml) template. | `config/prconfig.xml` +`prlog4j2Path` | The location of a [prlog4j2.xml](config/deploy/prlog4j2.xml) template. | `config/prlog4j2.xml` + +### Pega diagnostic user + +While most cloud native deployments will take advantage of aggregated logging using a tool such as EFK, there may be a need to access the logs from Tomcat directly. In the event of a need to download the logs from tomcat, a username and password will be required. You may set `pegaDiagnosticUser` and `pegaDiagnosticPassword` to set up authentication for Tomcat. + + +## Cassandra and DDS deployment + +If you are planning to use Cassandra (usually as a part of Pega Decisioning), you may either point to an existing deployment or deploy a new instance along with Pega. + +### Using an existing Cassandra deployment + +To use an existing Cassandra deployment, set `cassandra.enabled` to `false` and configure the `dds` section to reference your deployment. + +Example: + +```yaml +cassandra: + enabled: false + +dds: + externalNodes: "CASSANDRA_NODE_IPS" + port: "9042" + username: "cassandra_username" + password: "cassandra_password" +``` + +### Deploying Cassandra with Pega + +You may deploy a Cassandra instance along with Pega. Cassandra is a seperate technology and needs to be independently managed. When deploying Cassandra, set `cassandra.enabled` to `true` and leave the `dds` section as-is. For more information about configuring Cassandra, see the [Cassandra Helm charts](https://github.com/helm/charts/blob/master/incubator/cassandra/values.yaml). + +*Cassandra minimum resource requirements* + +Deployment | CPU | Memory +--- | --- | --- +Development | 2 cores | 4Gi +Production | 4 cores | 8Gi + +Example: + +```yaml +cassandra: + enabled: true + # Set any additional Cassandra parameters. These values will be used by Cassandra's helm chart. + persistence: + enabled: true + resources: + requests: + memory: "4Gi" + cpu: 2 + limits: + memory: "8Gi" + cpu: 4 + +dds: + externalNodes: "" + port: "9042" + username: "dnode_ext" + password: "dnode_ext" +``` + +## Search deployment + +Use the `pegasearch` section to configure a deployment of ElasticSearch for searching Rules and Work within Pega. This deployment is used exclusively for Pega search, and is not the same ElasticSearch deployment used by the EFK stack or any other dedicated service such as Pega BI. + +Set the `pegasearch.image` location to a registry that can access the Pega search Docker image. The image is [available on DockerHub](https://hub.docker.com/r/pegasystems/search), and you may choose to mirror it in a private Docker repository. + +Example: + +```yaml +pegasearch: + image: "pegasystems/search:8.3" +``` diff --git a/charts/pega/charts/installer/Chart.yaml b/charts/pega/charts/installer/Chart.yaml new file mode 100644 index 000000000..f7d985c82 --- /dev/null +++ b/charts/pega/charts/installer/Chart.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +appVersion: "1.0" +description: Helm chart for Pega installation on kubernetes +name: installer +version: 1.2.0 diff --git a/charts/pega/charts/installer/config/DB2SiteDependent.properties b/charts/pega/charts/installer/config/DB2SiteDependent.properties new file mode 100644 index 000000000..d3d7be507 --- /dev/null +++ b/charts/pega/charts/installer/config/DB2SiteDependent.properties @@ -0,0 +1,25 @@ +#BLBBFP=BP32K1 +#BLBLCK=LOB +#BLBLCM=0 +#BLBLOG=YES +#BLBPRI=14400 +#BLBSEC=7200 +#BLBSTG=Blob.storage.group.name +## +#IDXBP=BP2 +#IDXPRI=7200 +#IDXSEC=7200 +#IDXSTG=Index.storage.group.name +## +#TSPLCK=PAGE +#TSPLCM=0 +#TSPPRI=14400 +#TSPSEC=7200 +#TSPSTG=Table.storage.group.name +#TS32BP=BP32K +# +#CCSID=EBCDIC +#DBNAME=PEGDB +#DBOWNR=PEGDBDBO +#DBUSER=PEGDB +#WLMUDF= diff --git a/charts/pega/charts/installer/config/db2zos.conf b/charts/pega/charts/installer/config/db2zos.conf new file mode 100644 index 000000000..9b31d91a2 --- /dev/null +++ b/charts/pega/charts/installer/config/db2zos.conf @@ -0,0 +1,8 @@ +# Add any special properties for your Database configuration +currentSchema= +currentSQLID= +currentFunctionPath=SYSIBM,SYSFUN +progressiveStreaming=2 +fullyMaterializeLobData=true +defaultIsolationLevel=1 +useJDBC4ColumnNameAndLabelSemantics=2 \ No newline at end of file diff --git a/charts/pega/charts/installer/config/migrateSystem.properties.tmpl b/charts/pega/charts/installer/config/migrateSystem.properties.tmpl new file mode 100644 index 000000000..810e9076c --- /dev/null +++ b/charts/pega/charts/installer/config/migrateSystem.properties.tmpl @@ -0,0 +1,82 @@ +# Properties File for use with migrateSystem.xml Update this file +# before using migrate.bat/sh script. +# Set the DB connection + +################### COMMON PROPERTIES - DB CONNECTION ################## +######################################################################## + +#The system where the tables/rules will be migrated from +pega.source.jdbc.driver.jar={{ .Env.DRIVER_JAR_PATH }} +pega.source.jdbc.driver.class={{ .Env.JDBC_CLASS }} +pega.source.database.type={{ .Env.DB_TYPE }} +pega.source.jdbc.url={{ .Env.JDBC_URL }} +pega.source.jdbc.username={{ .Env.DB_USERNAME }} +pega.source.jdbc.password={{ .Env.DB_PASSWORD }} +#Custom connection properties +pega.source.jdbc.custom.connection.properties={{ .Env.JDBC_CUSTOM_CONNECTION }} + +pega.source.rules.schema={{ .Env.RULES_SCHEMA }} +#Set the following property if the source system already contains a split schema. +pega.source.data.schema={{ .Env.DATA_SCHEMA }} +# Used for systems with a separate Customer Data Schema +# The value of pega.source.data is the default value for pega.source.customerdata.schema +pega.source.customerdata.schema={{ .Env.CUSTOMERDATA_SCHEMA }} + +#The system where the tables/rules will be migrated to +pega.target.jdbc.driver.jar={{ .Env.DRIVER_JAR_PATH }} +pega.target.jdbc.driver.class={{ .Env.JDBC_CLASS }} +pega.target.database.type={{ .Env.DB_TYPE }} +pega.target.jdbc.url={{ .Env.JDBC_URL }} +pega.target.jdbc.username={{ .Env.DB_USERNAME }} +pega.target.jdbc.password={{ .Env.DB_PASSWORD }} +#Custom connection properties +pega.target.jdbc.custom.connection.properties={{ .Env.JDBC_CUSTOM_CONNECTION }} + +pega.target.rules.schema={{ .Env.TARGET_RULES_SCHEMA }} +#Used to correctly schema qualify tables in stored procedures, views and triggers. +#This property is not required if migrating before performing an upgrade. +pega.target.data.schema={{ .Env.TARGET_DATA_SCHEMA }} +# Used for systems with a separate Customer Data Schema +# The value of pega.target.data is the default value for pega.target.customerdata.schema +pega.target.customerdata.schema={{ .Env.TARGET_CUSTOMERDATA_SCHEMA }} + +#Set this property to bypass udf generation on the target system. +pega.target.bypass.udf={{ .Env.BYPASS_UDF_GENERATION }} + +#The location of the db2zos site specific properties file. Only used if the target system is a db2zos database. +pega.target.zos.properties=config/db2zos/DB2SiteDependent.properties + +#The commit count to use when loading database tables +db.load.commit.rate={{ .Env.MIGRATION_DB_LOAD_COMMIT_RATE }} + +################### Migrate System Properties ########################################### +#The directory where output from the bulk mover will be stored. This directory will be cleared when pega.bulkmover.unload.db is run. +#This property must be set if either pega.bulkmover.unload.db or pega.bulkmover.load.db is set to true. +pega.bulkmover.directory=/opt/pega/kit/scripts/upgrade/mover + +#The location where a temporary directory will be created for use by the migrate system utilities. +pega.migrate.temp.directory=/opt/pega/kit/scripts/upgrade/migrate + + +######## The operations to be run by the utility, they will only be run if the property is set to true. +#Set to true if migrating before an upgrade. If true admin table(s) required +#for an upgrade will be migrated with the rules tables. +pega.move.admin.table={{ .Env.MOVE_ADMIN_TABLE }} +#Generate an xml document containing the definitions of tables in the source system. It will be found in the schema directory of the +#distribution image. +pega.clone.generate.xml={{ .Env.CLONE_GENERATE_XML }} +#Create ddl from the generated xml document. This ddl can be used to create copies of rule tables found on the source system. +pega.clone.create.ddl={{ .Env.CLONE_CREATE_DDL }} +#Apply the generated clone ddl to the target system. +pega.clone.apply.ddl={{ .Env.CLONE_APPLY_DDL }} +#Unload the rows from the rules tables on the source system into the pega.bulkmover.directory. +pega.bulkmover.unload.db={{ .Env.BULKMOVER_UNLOAD_DB }} +#Load the rows onto the target system from the pega.bulkmover.directory. +pega.bulkmover.load.db={{ .Env.BULKMOVER_LOAD_DB }} + +### The following operations should only be run when migrating upgraded rules +#Generate the rules schema objects (views, triggers, procedures, functions). The objects will be created in the pega.target.rules.schema +#but will contain references to the pega.target.data.schema where appropriate. +pega.rules.objects.generate={{ .Env.RULES_OBJECTS_GENERATE }} +#Apply the rules schema objects (views, triggers, procedures, functions) to pega.target.rules.schema. +pega.rules.objects.apply={{ .Env.RULES_OBJECTS_APPLY }} \ No newline at end of file diff --git a/charts/pega/charts/installer/config/mssql.conf b/charts/pega/charts/installer/config/mssql.conf new file mode 100644 index 000000000..2657958d0 --- /dev/null +++ b/charts/pega/charts/installer/config/mssql.conf @@ -0,0 +1 @@ +# Add any special properties for your Database configuration diff --git a/charts/pega/charts/installer/config/oracledate.conf b/charts/pega/charts/installer/config/oracledate.conf new file mode 100644 index 000000000..d69420838 --- /dev/null +++ b/charts/pega/charts/installer/config/oracledate.conf @@ -0,0 +1,2 @@ +# Add any special properties for your Database configuration +oracle.jdbc.V8Compatible=true \ No newline at end of file diff --git a/charts/pega/charts/installer/config/postgres.conf b/charts/pega/charts/installer/config/postgres.conf new file mode 100644 index 000000000..005138a90 --- /dev/null +++ b/charts/pega/charts/installer/config/postgres.conf @@ -0,0 +1 @@ +# Add any special properties for your Database configuration \ No newline at end of file diff --git a/charts/pega/charts/installer/config/prbootstrap.properties.tmpl b/charts/pega/charts/installer/config/prbootstrap.properties.tmpl new file mode 100644 index 000000000..e5cdcce26 --- /dev/null +++ b/charts/pega/charts/installer/config/prbootstrap.properties.tmpl @@ -0,0 +1,19 @@ +install.{{ .Env.DB_TYPE }}.schema={{ .Env.DATA_SCHEMA }} +initialization.settingsource=file +com.pega.pegarules.priv.LogHelper.USE_LOG4JV2=true +maxIdle={{ .Env.MAX_IDLE }} +com.pega.pegarules.bootstrap.engineclasses.tablename={{ .Env.RULES_SCHEMA }}.pr_engineclasses +install.{{ .Env.DB_TYPE }}.rulesSchema={{ .Env.RULES_SCHEMA }} +maxWait={{ .Env.MAX_WAIT }} +install.{{ .Env.DB_TYPE }}.url={{ .Env.JDBC_URL }} +maxActive={{ .Env.MAX_ACTIVE }} +install.{{ .Env.DB_TYPE }}.username={{ .Env.DB_USERNAME }} +{{ .Env.DB_TYPE }}.jdbc.class={{ .Env.JDBC_CLASS }} +com.pega.pegarules.bootstrap.assembledclasses.tablename={{ .Env.RULES_SCHEMA }}.pr_assembledclasses +com.pega.pegarules.bootstrap.assembledclasses.dbcpsource=install.{{ .Env.DB_TYPE }} +com.pega.pegarules.bootstrap.tempdir=/opt/pega/temp +poolPreparedStatements=true +install.{{ .Env.DB_TYPE }}.connectionProperties={{ .Env.JDBC_CUSTOM_CONNECTION }} +install.{{ .Env.DB_TYPE }}.password={{ .Env.DB_PASSWORD }} +com.pega.pegarules.bootstrap.codeset.version.Pega-EngineCode={{ .Env.CODESET_VERSION }} +com.pega.pegarules.bootstrap.engineclasses.dbcpsource=install.{{ .Env.DB_TYPE }} diff --git a/charts/pega/charts/installer/config/prconfig.xml.tmpl b/charts/pega/charts/installer/config/prconfig.xml.tmpl new file mode 100644 index 000000000..47852ea42 --- /dev/null +++ b/charts/pega/charts/installer/config/prconfig.xml.tmpl @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/charts/pega/charts/installer/config/prlog4j2.xml b/charts/pega/charts/installer/config/prlog4j2.xml new file mode 100644 index 000000000..e349a1dae --- /dev/null +++ b/charts/pega/charts/installer/config/prlog4j2.xml @@ -0,0 +1,128 @@ + + + + + + + + + + + + + + + %d [%20.20t] [%10.10X{pegathread}] [%20.20X{tenantid}] [%20.20X{app}] (%30.30c{3}) %-5p %X{stack} %X{userid} - %m%n + + + + + + + + + + + + + + + + + %m%n + + + + + + + + + + + + + + %m%n + + + + + + + + + + + + + + %8r [%t] %-5p %c - %m%n + + + + + + + + + + + + + %d [%20.20t] [%10.10X{pegathread}] [%20.20X{app}] (%30.30c{3}) %-5p %X{stack} %X{userid} - %m%n + + + + + + + + + + + %d [%20.20t] [%10.10X{pegathread}] [%20.20X{app}] (%30.30c{3}) %-5p %X{stack} %X{userid} - %m%n + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/charts/pega/charts/installer/config/prpcUtils.properties.tmpl b/charts/pega/charts/installer/config/prpcUtils.properties.tmpl new file mode 100644 index 000000000..135efe68e --- /dev/null +++ b/charts/pega/charts/installer/config/prpcUtils.properties.tmpl @@ -0,0 +1,32 @@ +# Properties file for use with PRPC Utilities. + +################### COMMON PROPERTIES - DB CONNECTION ################## +######################################################################## +# CONNECTION INFORMATION +pega.jdbc.driver.jar={{ .Env.DRIVER_JAR_PATH }} +pega.jdbc.driver.class={{ .Env.JDBC_CLASS }} +pega.database.type={{ .Env.DB_TYPE }} +pega.jdbc.url={{ .Env.JDBC_URL }} +pega.jdbc.username={{ .Env.DB_USERNAME }} +pega.jdbc.password={{ .Env.DB_PASSWORD }} + +# CUSTOM CONNECTION PROPERTIES +jdbc.custom.connection.properties={{ .Env.JDBC_CUSTOM_CONNECTION }} + +# RULES SCHEMA NAME +rules.schema.name={{ .Env.RULES_SCHEMA }} + +# DATA SCHEMA NAME +data.schema.name={{ .Env.DATA_SCHEMA }} + +# CUSTOMER DATA SCHEMA NAME +customerdata.schema.name={{ .Env.CUSTOMERDATA_SCHEMA }} + +# USER TEMP DIRECTORY +# Will use default if not set to valid directory +user.temp.dir=/opt/pega/temp + +############################### SETTINGS FOR CHANGING DYNAMIC SYSTEM SETTINGS ######## +###################################################################################### +dass.filepath=/opt/pega/kit/scripts/upgrade_dass_settings.json +pega.codeset.version={{ .Env.ENGINE_CODESET_VERSION }} diff --git a/charts/pega/charts/installer/config/setupDatabase.properties.tmpl b/charts/pega/charts/installer/config/setupDatabase.properties.tmpl new file mode 100644 index 000000000..36f0db94d --- /dev/null +++ b/charts/pega/charts/installer/config/setupDatabase.properties.tmpl @@ -0,0 +1,74 @@ +# Properties file for use with Pega Deployment Utilities. +# For more information, see the Pega Platform help. + +################### COMMON PROPERTIES - DB CONNECTION ################## +######################################################################## + +# CONNECTION INFORMATION +pega.jdbc.driver.jar={{ .Env.DRIVER_JAR_PATH }} +pega.jdbc.driver.class={{ .Env.JDBC_CLASS }} +pega.database.type={{ .Env.DB_TYPE }} +pega.jdbc.url={{ .Env.JDBC_URL }} +pega.jdbc.username={{ .Env.DB_USERNAME }} +pega.jdbc.password={{ .Env.DB_PASSWORD }} + +pega.admin.password={{ .Env.ADMIN_PASSWORD }} + +jdbc.custom.connection.properties={{ .Env.JDBC_CUSTOM_CONNECTION }} + +# RULES SCHEMA NAME +rules.schema.name={{ .Env.RULES_SCHEMA }} + +# DATA SCHEMA NAME +data.schema.name={{ .Env.DATA_SCHEMA }} + +# CUSTOMER DATA SCHEMA NAME +customerdata.schema.name={{ .Env.CUSTOMERDATA_SCHEMA }} + +# USER TEMP DIRECTORY +# Will use default if not set to valid directory +user.temp.dir=/opt/pega/temp + +# z/OS SITE-SPECIFIC PROPERTIES FILE +pega.zos.properties={{ .Env.ZOS_PROPERTIES }} + +# BYPASS UDF GENERATION? +bypass.udf.generation={{ .Env.BYPASS_UDF_GENERATION }} + +# BYPASS AUTOMATICALLY TRUNCATING PR_SYS_UPDATESCACHE? +bypass.truncate.updatescache={{ .Env.BYPASS_TRUNCATE_UPDATESCACHE }} + +# REBUILD DATABASE RULES INDEXES +rebuild.indexes={{ .Env.REBUILD_INDEXES }} + +# SYSTEM NAME +system.name={{ .Env.SYSTEM_NAME }} + +# PRODUCTION LEVEL +production.level={{ .Env.PRODUCTION_LEVEL }} + +# MULTITENANT SYSTEM? +# A multitenant system allows organizations to act as separate Pega Platform installations +multitenant.system={{ .Env.MT_SYSTEM }} + +# UPDATE EXISTING APPLICATIONS +update.existing.applications={{ .Env.UPDATE_EXISTING_APPLICATIONS }} + +# UPDATE APPLICATIONS SCHEMA +update.applications.schema={{ .Env.UPDATE_APPLICATIONS_SCHEMA }} + +# WORKLOAD MANAGER +db2zos.udf.wlm={{ .Env.DB2_ZOS_UDF_WLM }} + +# RUN RULESET CLEANUP? +run.ruleset.cleanup={{ .Env.RUN_RULESET_CLEANUP }} + +# CUSTOM CONFIGURATION PROPERTIES FILE +# The congfiguration files are dockerized using .tmpl files and are stored in opt/pega/config +# inside the container. +pegarules.config=/opt/pega/kit/scripts/prconfig.xml +prbootstrap.config=/opt/pega/kit/scripts/prbootstrap.properties +prlogging.config=/opt/pega/kit/scripts/prlog4j2.xml + +# Create schema if absent flag - Only from Docker related deployments +pega.schema.autocreate=true \ No newline at end of file diff --git a/charts/pega/charts/installer/config/udb.conf b/charts/pega/charts/installer/config/udb.conf new file mode 100644 index 000000000..83224878b --- /dev/null +++ b/charts/pega/charts/installer/config/udb.conf @@ -0,0 +1,3 @@ +# Add any special properties for your Database configuration +currentSchema= +currentFunctionPath=SYSIBM,SYSFUN \ No newline at end of file diff --git a/charts/pega/charts/installer/templates/_helpers.tpl b/charts/pega/charts/installer/templates/_helpers.tpl new file mode 100644 index 000000000..f6f75d6d7 --- /dev/null +++ b/charts/pega/charts/installer/templates/_helpers.tpl @@ -0,0 +1,93 @@ +{{- define "pegaVolumeInstall" }}pega-volume-installer{{- end }} +{{- define "pegaInstallConfig" }}pega-installer-config{{- end }} +{{- define "pegaDBInstall" -}}pega-db-install{{- end -}} +{{- define "pegaDBUpgrade" -}}pega-db-upgrade{{- end -}} +{{- define "installerConfig" -}}installer-config{{- end -}} +{{- define "installerJobReaderRole" -}}jobs-reader{{- end -}} +{{- define "pegaPreDBUpgrade" -}}pega-pre-upgrade{{- end -}} +{{- define "pegaPostDBUpgrade" -}}pega-post-upgrade{{- end -}} +{{- define "pegaInstallEnvironmentConfig" -}}pega-install-environment-config{{- end -}} +{{- define "pegaUpgradeEnvironmentConfig" -}}pega-upgrade-environment-config{{- end -}} +{{- define "pegaDistributionKitVolume" -}}pega-distribution-kit-volume{{- end -}} + +{{- define "performInstall" }} + {{- if or (eq .Values.global.actions.execute "install") (eq .Values.global.actions.execute "install-deploy") -}} + true + {{- else -}} + false + {{- end -}} +{{- end }} + +{{- define "performUpgrade" }} + {{- if or (eq .Values.global.actions.execute "upgrade") (eq .Values.global.actions.execute "upgrade-deploy") -}} + true + {{- else -}} + false + {{- end -}} +{{- end }} + +{{- define "performOnlyUpgrade" }} + {{- if (eq .Values.global.actions.execute "upgrade") -}} + true + {{- else -}} + false + {{- end -}} +{{- end }} + +{{- define "waitForPegaDBInstall" -}} +- name: wait-for-pegainstall + image: dcasavant/k8s-wait-for + args: [ 'job', '{{ template "pegaDBInstall" }}'] +{{- end }} + +{{- define "waitForPegaDBUpgrade" -}} +- name: wait-for-pegaupgrade + image: dcasavant/k8s-wait-for + args: [ 'job', '{{ template "pegaDBUpgrade" }}'] +{{- include "initContainerEnvs" $ }} +{{- end }} + +{{- define "waitForPreDBUpgrade" -}} +- name: wait-for-pre-dbupgrade + image: dcasavant/k8s-wait-for + args: [ 'job', '{{ template "pegaPreDBUpgrade" }}'] +{{- end }} + +{{- define "waitForRollingUpdates" -}} +{{- $rolloutCommand := "" }} +{{- $kindName := "" }} +{{- $lastIndex := sub (len .Values.global.tier) 1 }} +{{- $namespace := .Release.Namespace }} +{{- range $index, $dep := .Values.global.tier }} +{{- if ($dep.volumeClaimTemplate) }} +{{- $kindName = "statefulset" }} +{{- else -}} +{{- $kindName = "deployment" }} +{{- end }} +{{- $constructCommand := cat "kubectl rollout status" $kindName "/" "pega-" $dep.name "--namespace" $namespace }} +{{- if ne $index $lastIndex }} +{{- $rolloutCommand = cat $rolloutCommand $constructCommand "&&" }} +{{- else }} +{{- $rolloutCommand = cat $rolloutCommand $constructCommand }} +{{- end }} +{{- $rolloutCommand = regexReplaceAllLiteral " / " $rolloutCommand "/" }} +{{- $rolloutCommand = regexReplaceAllLiteral "pega- " $rolloutCommand "pega-" }} +{{- end -}} +- name: wait-for-rolling-updates + image: dcasavant/k8s-wait-for + command: ['sh', '-c', '{{ $rolloutCommand }}' ] +{{- include "initContainerEnvs" $ }} +{{- end }} + +{{- define "initContainerEnvs" -}} +{{- if or (eq .Values.global.provider "aks") (eq .Values.global.provider "pks") -}} +{{ $apiserver := index .Values.global.upgrade "kube-apiserver" }} + env: + - name: KUBERNETES_SERVICE_HOST + value: {{ $apiserver.serviceHost | quote }} + - name: KUBERNETES_SERVICE_PORT_HTTPS + value: {{ $apiserver.httpsServicePort | quote }} + - name: KUBERNETES_SERVICE_PORT + value: {{ $apiserver.httpsServicePort | quote }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/pega/charts/installer/templates/_pega-installer-config.tpl b/charts/pega/charts/installer/templates/_pega-installer-config.tpl new file mode 100644 index 000000000..93997c53d --- /dev/null +++ b/charts/pega/charts/installer/templates/_pega-installer-config.tpl @@ -0,0 +1,100 @@ +{{- define "pega.installer.config" -}} +{{- $arg := .mode -}} +# Node type specific configuration for {{ .name }} +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ .name }} + namespace: {{ .root.Release.Namespace }} +data: +# Start of Pega Installer Configurations + +{{ if eq $arg "installer-config" }} + +{{- $prconfigTemplatePath := "config/prconfig.xml.tmpl" }} +{{- $setupDatabasePath := "config/setupDatabase.properties" }} +{{- $setupDatabasetemplatePath := "config/setupDatabase.properties.tmpl" }} +{{- $prbootstraptemplatePath := "config/prbootstrap.properties.tmpl" }} +{{- $prpcUtilsPropertiestemplatePath := "config/prpcUtils.properties.tmpl" }} +{{- $migrateSystempropertiestemplatePath := "config/migrateSystem.properties.tmpl" }} + +{{ if $prconfigTemplate := .root.Files.Glob $prconfigTemplatePath }} + # prconfigTemplate to be used by {{ .name }} + prconfig.xml.tmpl: |- +{{ .root.Files.Get $prconfigTemplatePath | indent 6 }} +{{- end }} + +{{ if $setupDatabase := .root.Files.Glob $setupDatabasePath }} + # setupDatabase to be used by {{ .name }} + setupDatabase.properties: |- +{{ .root.Files.Get $setupDatabasePath | indent 6 }} +{{- end }} + +{{ if $setupDatabasetemplate := .root.Files.Glob $setupDatabasetemplatePath }} + # setupDatabasetemplate to be used by {{ .name }} + setupDatabase.properties.tmpl: |- +{{ .root.Files.Get $setupDatabasetemplatePath | indent 6 }} +{{- end }} + +{{ if $prbootstraptemplate := .root.Files.Glob $prbootstraptemplatePath }} + # prbootstraptemplate to be used by {{ .name }} + prbootstrap.properties.tmpl: |- +{{ .root.Files.Get $prbootstraptemplatePath | indent 6 }} +{{- end }} + +{{ if $prpcUtilsPropertiestemplate := .root.Files.Glob $prpcUtilsPropertiestemplatePath }} + # prpcUtilsPropertiestemplate to be used by {{ .name }} + prpcUtils.properties.tmpl: |- +{{ .root.Files.Get $prpcUtilsPropertiestemplatePath | indent 6 }} +{{- end }} + +{{ if $migrateSystempropertiestemplate := .root.Files.Glob $migrateSystempropertiestemplatePath }} + # migrateSystempropertiestemplate to be used by {{ .name }} + migrateSystem.properties.tmpl: |- +{{ .root.Files.Get $migrateSystempropertiestemplatePath | indent 6 }} +{{- end }} + +{{- $prlog4j2Path := "config/prlog4j2.xml" }} + # prlog4j2 file to be used by {{ .name }} + prlog4j2.xml: |- +{{ .root.Files.Get $prlog4j2Path | indent 6 }} + +{{- $dbType := .dbType }} +{{- $postgresConfPath := "config/postgres/postgres.conf" }} +{{- $oracledateConfPath := "config/oracledate/oracledate.conf" }} +{{- $db2zosConfPath := "config/db2zos/db2zos.conf" }} +{{- $mssqlConfPath := "config/mssql/mssql.conf" }} +{{- $udbConfPath := "config/udb/udb.conf" }} +{{- $zosPropertiesPath := "config/db2zos/DB2SiteDependent.properties" }} + +{{ if and (eq $dbType "postgres") ( $postgresConf := .root.Files.Glob $postgresConfPath ) }} + postgres.conf: |- +{{ .root.Files.Get $postgresConfPath | indent 6 }} +{{- end }} + +{{ if and (eq $dbType "oracledate") ( $oracledateConf := .root.Files.Glob $oracledateConfPath ) }} + oracledate.conf: |- +{{ .root.Files.Get $oracledateConfPath | indent 6 }} +{{- end }} + +{{ if and (eq $dbType "mssql") ( $mssqlConf := .root.Files.Glob $mssqlConfPath ) }} + mssql.conf: |- +{{ .root.Files.Get $mssqlConfPath | indent 6 }} +{{- end }} + +{{ if and (eq $dbType "db2zos") ( $db2zosConf := .root.Files.Glob $db2zosConfPath ) ( $db2zosProperties := .root.Files.Glob $zosPropertiesPath ) }} + db2zos.conf: |- +{{ .root.Files.Get $db2zosConfPath | indent 6 }} + DB2SiteDependent.properties: |- +{{ .root.Files.Get $zosPropertiesPath | indent 6 }} +{{- end }} + +{{ if and (eq $dbType "udb") ( $udbConf := .root.Files.Glob $udbConfPath ) }} + udb.conf: |- +{{ .root.Files.Get $udbConfPath | indent 6 }} +{{- end }} + +{{- end }} +# End of Pega Installer Configurations +{{- end }} + diff --git a/charts/pega/charts/installer/templates/_pega-installer-job.tpl b/charts/pega/charts/installer/templates/_pega-installer-job.tpl new file mode 100644 index 000000000..a1dc29197 --- /dev/null +++ b/charts/pega/charts/installer/templates/_pega-installer-job.tpl @@ -0,0 +1,74 @@ +{{- define "pega.installer" -}} +{{- $arg := .action -}} +kind: Job +apiVersion: batch/v1 +metadata: + name: {{ .name }} + namespace: {{ .root.Release.Namespace }} +spec: + backoffLimit: 0 + template: + spec: + volumes: +{{- if and .root.Values.distributionKitVolumeClaimName (not .root.Values.distributionKitURL) }} + - name: {{ template "pegaDistributionKitVolume" }} + persistentVolumeClaim: + claimName: {{ .root.Values.distributionKitVolumeClaimName }} +{{- end }} + - name: {{ template "pegaVolumeCredentials" }} + secret: + # This name will be referred in the volume mounts kind. + secretName: {{ template "pegaCredentialsSecret" }} + # Used to specify permissions on files within the volume. + defaultMode: 420 + - name: {{ template "pegaVolumeInstall" }} + configMap: + # This name will be referred in the volume mounts kind. + name: {{ template "pegaInstallConfig"}} + # Used to specify permissions on files within the volume. + defaultMode: 420 + initContainers: +{{- range $i, $val := .initContainers }} +{{ include $val $.root | indent 6 }} +{{- end }} + containers: + - name: {{ .name }} + image: {{ .root.Values.image }} + ports: + - containerPort: 8080 + resources: + # CPU and Memory that the containers for {{ .name }} request + requests: + cpu: "{{ .root.Values.resources.requests.cpu }}" + memory: "{{ .root.Values.resources.requests.memory }}" + limits: + cpu: "{{ .root.Values.resources.limits.cpu }}" + memory: "{{ .root.Values.resources.limits.memory }}" + volumeMounts: + # The given mountpath is mapped to volume with the specified name. The config map files are mounted here. + - name: {{ template "pegaVolumeInstall" }} + mountPath: "/opt/pega/config" + - name: {{ template "pegaVolumeCredentials" }} + mountPath: "/opt/pega/secrets" +{{- if and .root.Values.distributionKitVolumeClaimName (not .root.Values.distributionKitURL) }} + - name: {{ template "pegaDistributionKitVolume" }} + mountPath: "/opt/pega/mount/kit" +{{- end }} +{{- if or (eq $arg "pre-upgrade") (eq $arg "post-upgrade") (eq $arg "upgrade") }} + env: + - name: ACTION + value: {{ .action }} + envFrom: + - configMapRef: + name: {{ template "pegaUpgradeEnvironmentConfig" }} +{{- end }} +{{- if (eq $arg "install") }} + envFrom: + - configMapRef: + name: {{ template "pegaInstallEnvironmentConfig" }} +{{- end }} + restartPolicy: Never + imagePullSecrets: + - name: {{ template "pegaRegistrySecret" }} +--- +{{- end -}} diff --git a/charts/pega/charts/installer/templates/pega-install-environment-config.yaml b/charts/pega/charts/installer/templates/pega-install-environment-config.yaml new file mode 100644 index 000000000..41a36adce --- /dev/null +++ b/charts/pega/charts/installer/templates/pega-install-environment-config.yaml @@ -0,0 +1,55 @@ +{{ if (eq (include "performInstall" .) "true") -}} +# Config map used for Pega Installation +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ template "pegaInstallEnvironmentConfig" }} + namespace: {{ .Release.Namespace }} +data: + # Database Type for installation + DB_TYPE: {{ .Values.global.jdbc.dbType }} + # JDBC URL of the DB where Pega is installed + JDBC_URL: {{ .Values.global.jdbc.url }} + # Class name of the DB's JDBC driver + JDBC_CLASS: {{ .Values.global.jdbc.driverClass }} + # URI that the JDBC driver can be downloaded from + JDBC_DRIVER_URI: {{ .Values.global.jdbc.driverUri }} + # Rules schema of the Pega installation + RULES_SCHEMA: {{ .Values.global.jdbc.rulesSchema }} + # Data schema of the Pega installation + DATA_SCHEMA: {{ .Values.global.jdbc.dataSchema }} + # CustomerData schema of the Pega installation + CUSTOMERDATA_SCHEMA: {{ .Values.global.jdbc.customerDataSchema }} + # Creates a new System and replaces this with default system + SYSTEM_NAME: {{ .Values.systemName }} + # Creates the system with this production level + PRODUCTION_LEVEL: {{ .Values.productionLevel | quote}} + # Whether this is a Multitenant System ('true' if yes, 'false' if no) + MULTITENANT_SYSTEM: {{ .Values.multitenantSystem | quote}} + # Temporary password for administrator@pega.com that is used to install Pega Platform + ADMIN_PASSWORD: {{ .Values.adminPassword }} + # Run the Static Assembler ('true' to run, 'false' to not run) + STATIC_ASSEMBLER: {{ .Values.assembler | quote}} + # UDF generation will be skipped if this property is set to true + BYPASS_UDF_GENERATION: {{ .Values.bypassUdfGeneration | quote}} + # Bypass automatically truncating PR_SYS_UPDATESCACHE . Default is false. + BYPASS_TRUNCATE_UPDATESCACHE: {{ .Values.bypassTruncateUpdatescache | quote }} + # JDBC custom connection properties + JDBC_CUSTOM_CONNECTION: {{ .Values.jdbcCustomConnection }} + # Maximum Idle Thread.Default is 5 + MAX_IDLE: {{ .Values.threads.maxIdle | quote }} + # Maximum Thread Wait.Default is -1 + MAX_WAIT: {{ .Values.threads.maxWait | quote }} + # Maximum Active Thread.Default is 10 + MAX_ACTIVE: {{ .Values.threads.maxActive | quote }} + # Z/OS SITE-SPECIFIC PROPERTIES FILE + ZOS_PROPERTIES: {{ .Values.zos.zosProperties }} + # Specify the workload manager to load UDFs into db2zos + DB2ZOS_UDF_WLM: {{ .Values.zos.db2zosUdfWlm}} + # Specify Database Type + DB_TYPE: {{ .Values.global.jdbc.dbType}} + # Action to be performed by installer image + ACTION: {{ .Values.global.actions.execute }} + # Distribution kit URL + DISTRIBUTION_KIT_URL: {{ .Values.distributionKitURL }} +{{ end }} diff --git a/charts/pega/charts/installer/templates/pega-installer-action-validate.yaml b/charts/pega/charts/installer/templates/pega-installer-action-validate.yaml new file mode 100644 index 000000000..087eb1456 --- /dev/null +++ b/charts/pega/charts/installer/templates/pega-installer-action-validate.yaml @@ -0,0 +1,6 @@ +{{- if (eq (include "performUpgrade" .) "true") }} +{{- $validUpgradeType := list "in-place" "out-of-place" }} +{{- if not (has .Values.upgrade.upgradeType $validUpgradeType) }} +{{- fail "Upgrade Type value is not correct. The valid values are 'in-place' 'out-of-place'" }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/pega/charts/installer/templates/pega-installer-config.yaml b/charts/pega/charts/installer/templates/pega-installer-config.yaml new file mode 100644 index 000000000..7371551fe --- /dev/null +++ b/charts/pega/charts/installer/templates/pega-installer-config.yaml @@ -0,0 +1,3 @@ +{{ if or (eq (include "performInstall" .) "true") (eq (include "performUpgrade" .) "true") }} +{{ template "pega.installer.config" dict "root" $ "dbType" .Values.global.jdbc.dbType "name" (include "pegaInstallConfig" .) "mode" (include "installerConfig" .) }} +{{ end }} \ No newline at end of file diff --git a/charts/pega/charts/installer/templates/pega-installer-job.yaml b/charts/pega/charts/installer/templates/pega-installer-job.yaml new file mode 100644 index 000000000..6e728e65a --- /dev/null +++ b/charts/pega/charts/installer/templates/pega-installer-job.yaml @@ -0,0 +1,11 @@ +{{ if (eq (include "performInstall" .) "true") }} +{{ template "pega.installer" dict "root" $ "name" (include "pegaDBInstall" .) "action" "install" }} +{{ end }} +{{ if (eq (include "performOnlyUpgrade" .) "true") }} +{{ template "pega.installer" dict "root" $ "name" (include "pegaDBUpgrade" .) "action" "upgrade" }} +{{ end }} +{{ if (eq (include "performUpgradeAndDeployment" .) "true") }} +{{ template "pega.installer" dict "root" $ "name" (include "pegaPreDBUpgrade" .) "action" "pre-upgrade" }} +{{ template "pega.installer" dict "root" $ "name" (include "pegaDBUpgrade" .) "action" "upgrade" "initContainers" (list "waitForPreDBUpgrade") }} +{{ template "pega.installer" dict "root" $ "name" (include "pegaPostDBUpgrade" .) "action" "post-upgrade" "initContainers" (list "waitForPegaDBUpgrade" "waitForRollingUpdates") }} +{{ end }} \ No newline at end of file diff --git a/charts/pega/charts/installer/templates/pega-installer-role.yaml b/charts/pega/charts/installer/templates/pega-installer-role.yaml new file mode 100644 index 000000000..ace912195 --- /dev/null +++ b/charts/pega/charts/installer/templates/pega-installer-role.yaml @@ -0,0 +1,11 @@ +{{ if or (eq (include "performInstallAndDeployment" .) "true") (eq (include "performUpgradeAndDeployment" .) "true") }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: {{ template "installerJobReaderRole" }} + namespace: {{ .Release.Namespace }} +rules: +- apiGroups: ["", "batch", "extensions", "apps"] + resources: ["jobs", "deployments", "statefulsets"] + verbs: ["get", "watch", "list"] +{{ end }} diff --git a/charts/pega/charts/installer/templates/pega-installer-status-rolebinding.yaml b/charts/pega/charts/installer/templates/pega-installer-status-rolebinding.yaml new file mode 100644 index 000000000..11963817b --- /dev/null +++ b/charts/pega/charts/installer/templates/pega-installer-status-rolebinding.yaml @@ -0,0 +1,15 @@ +{{ if or (eq (include "performInstallAndDeployment" .) "true") (eq (include "performUpgradeAndDeployment" .) "true") }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: check-installer-status + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "installerJobReaderRole" }} +subjects: +- kind: ServiceAccount + name: default + namespace: {{ .Release.Namespace }} +{{ end }} \ No newline at end of file diff --git a/charts/pega/charts/installer/templates/pega-upgrade-environment-config.yaml b/charts/pega/charts/installer/templates/pega-upgrade-environment-config.yaml new file mode 100644 index 000000000..eca590847 --- /dev/null +++ b/charts/pega/charts/installer/templates/pega-upgrade-environment-config.yaml @@ -0,0 +1,50 @@ +{{ if (eq (include "performUpgrade" .) "true") -}} +# Config map used for common configuration between pre-upgrades, post-upgrades and upgrades +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ template "pegaUpgradeEnvironmentConfig" }} + namespace: {{ .Release.Namespace }} +data: + # Database Type for installation + DB_TYPE: {{ .Values.global.jdbc.dbType }} + # JDBC URL of the DB where Pega is installed + JDBC_URL: {{ .Values.global.jdbc.url }} + # Class name of the DB's JDBC driver + JDBC_CLASS: {{ .Values.global.jdbc.driverClass }} + # URI that the JDBC driver can be downloaded from + JDBC_DRIVER_URI: {{ .Values.global.jdbc.driverUri }} + # Rules schema of the Pega installation + RULES_SCHEMA: {{ .Values.global.jdbc.rulesSchema }} + # Data schema of the Pega installation + DATA_SCHEMA: {{ .Values.global.jdbc.dataSchema }} + # CustomerData schema of the Pega installation + CUSTOMERDATA_SCHEMA: {{ .Values.global.jdbc.customerDataSchema }} + # Type of Upgrade + UPGRADE_TYPE: {{ .Values.upgrade.upgradeType }} + # Whether this is a Multitenant System ('true' if yes, 'false' if no) + MULTITENANT_SYSTEM: {{ .Values.multitenantSystem | quote}} + # UDF generation will be skipped if this property is set to true + BYPASS_UDF_GENERATION: {{ .Values.bypassUdfGeneration | quote}} + # Z/OS SITE-SPECIFIC PROPERTIES FILE + ZOS_PROPERTIES: {{ .Values.zos.zosProperties }} + # Specify the workload manager to load UDFs into db2zos + DB2ZOS_UDF_WLM: {{ .Values.zos.db2zosUdfWlm}} + # Target Rules Schema name + TARGET_RULES_SCHEMA: {{ .Values.upgrade.targetRulesSchema }} + # The location of the db2zos site specific properties file. Only used if the target system is a db2zos database + TARGET_ZOS_PROPERTIES: {{ .Values.zos.zosProperties }} + # The commit count to use when loading database tables + MIGRATION_DB_LOAD_COMMIT_RATE: {{ .Values.upgrade.dbLoadCommitRate | quote }} + # Update existing application will be run if this property is set to true + UPDATE_EXISTING_APPLICATIONS: {{ .Values.upgrade.updateExistingApplications | quote }} + # Runs the Update Applications Schema utility to update the cloned Rule, Data, Work and Work History tables with the schema changes in the latest base tables if this property is set to true + UPDATE_APPLICATIONS_SCHEMA: {{ .Values.upgrade.updateApplicationsSchema | quote }} + # Generate and execute an SQL script to clean old rulesets and their rules from the system if this property is set to true + RUN_RULESET_CLEANUP: {{ .Values.upgrade.runRulesetCleanup | quote }} + # Rebuild Database Rules Indexes after Rules Load to improve Database Access Performance + REBUILD_INDEXES: {{ .Values.upgrade.rebuildIndexes | quote }} + # Distribution kit URL + DISTRIBUTION_KIT_URL: {{ .Values.distributionKitURL }} +{{ end }} + \ No newline at end of file diff --git a/charts/pega/charts/installer/values.yaml b/charts/pega/charts/installer/values.yaml new file mode 100644 index 000000000..763bcea3a --- /dev/null +++ b/charts/pega/charts/installer/values.yaml @@ -0,0 +1,69 @@ +--- +image: "YOUR_IMAGE_NAME" +# Creates a new System and replaces this with default system.Default is pega +systemName: "pega" +# During installations, the system name above is generated with the following production level.Default is 2 +# The system production level can be set to one of the below integer values (1-5): +# 5 = production; +# 4 = preproduction; +# 3 = test; +# 2 = development; +# 1 = experimental +productionLevel: 2 +# Whether this is a Multitenant System ('true' if yes, 'false' if no) +multitenantSystem: "false" +# UDF generation will be skipped if this property is set to true +bypassUdfGeneration: "true" +# Temporary password for administrator@pega.com that is used to install Pega Platform +adminPassword: "" +# Run the Static Assembler ('true' to run, 'false' to not run) +assembler: "" +# Bypass automatically truncating PR_SYS_UPDATESCACHE . Default is false. +bypassTruncateUpdatescache: "false" +# JDBC custom connection properties +jdbcCustomConnection: "" +# Distribution kit URL +distributionKitURL: "" +# A manually managed Persistent Volume Claim for mounting distribution kit. Given PVC must be created manually before volume will be bound. +# This as an alternative to distributionKitURL. If both are specified then distributionKitURL will take precedence. +# Only distriubtion kit zip file is expected inside the volume. +distributionKitVolumeClaimName: "" +threads: + # Maximum Idle Thread.Default is 5 + maxIdle: 5 + # Maximum Wait Thread.Default is -1 + maxWait: -1 + # Maximum Active Thread.Default is 10 + maxActive: 10 +zos: + # Z/OS SITE-SPECIFIC PROPERTIES FILE + zosProperties: "/opt/pega/config/DB2SiteDependent.properties" + # Specify the workload manager to load UDFs into db2zos + db2zosUdfWlm: "" +# Upgrade specific properties +upgrade: + # Type of upgrade + # Valid values are 'in-place' , 'out-of-place' + upgradeType: "" + # Specify target rules schema for migration and upgrade + targetRulesSchema: "" + # The commit count to use when loading database tables + dbLoadCommitRate: 100 + # Update existing application will be run if this property is set to true + updateExistingApplications: "false" + # Runs the Update Applications Schema utility to update the cloned Rule, Data, Work + # And Work History tables with the schema changes in the latest base tables if this property is set to true + updateApplicationsSchema: "false" + # Generate and execute an SQL script to clean old rulesets and their rules from the system if this property is set to true + runRulesetCleanup: "false" + # Rebuild Database Rules Indexes after Rules Load to improve Database Access Performance + rebuildIndexes: "false" + +# Memory and CPU settings for installer +resources: + requests: + memory: "5Gi" + cpu: 1 + limits: + memory: "6Gi" + cpu: 2 diff --git a/charts/pega/charts/pegasearch/Chart.yaml b/charts/pega/charts/pegasearch/Chart.yaml new file mode 100644 index 000000000..c6354e60c --- /dev/null +++ b/charts/pega/charts/pegasearch/Chart.yaml @@ -0,0 +1,10 @@ +--- +name: pegasearch +version: "1.2.0" +description: Pega Search installation on kubernetes +keywords: +- pega +- prpc +- kubernetes +- search +home: http://www.pega.com diff --git a/charts/pega/charts/pegasearch/templates/_helpers.tpl b/charts/pega/charts/pegasearch/templates/_helpers.tpl new file mode 100644 index 000000000..0cdd7511e --- /dev/null +++ b/charts/pega/charts/pegasearch/templates/_helpers.tpl @@ -0,0 +1,10 @@ +{{- define "searchName" -}} +pega-search +{{- end -}} +{{- define "isExternalSearch" }} + {{- if and (.Values.externalURL) (ne .Values.externalURL "http://pega-search") -}} + true + {{- else -}} + false + {{- end -}} +{{- end }} \ No newline at end of file diff --git a/charts/pega/templates/pega-search-deployment.yaml b/charts/pega/charts/pegasearch/templates/pega-search-deployment.yaml similarity index 73% rename from charts/pega/templates/pega-search-deployment.yaml rename to charts/pega/charts/pegasearch/templates/pega-search-deployment.yaml index 13e2e003f..06baba14d 100644 --- a/charts/pega/templates/pega-search-deployment.yaml +++ b/charts/pega/charts/pegasearch/templates/pega-search-deployment.yaml @@ -1,6 +1,7 @@ +{{ if (eq (include "performDeployment" .) "true") }} # Only attempt to generate this file IF the user has NOT supplied # an external search URL which means we need to create one internally. -{{ if .Values.search.externalURL }} {{ else }} +{{ if (eq (include "isExternalSearch" .) "true") }} {{ else }} kind: StatefulSet apiVersion: apps/v1beta1 metadata: @@ -15,34 +16,34 @@ spec: app: {{ template "searchName" . }} component: "Search" serviceName: {{ template "searchName" . }} - replicas: {{ .Values.search.replicas }} + replicas: {{ .Values.replicas }} template: metadata: labels: app: {{ template "searchName" . }} component: "Search" spec: - {{ if ne .Values.provider "openshift" }} + {{ if ne .Values.global.provider "openshift" }} securityContext: fsGroup: 1000 {{ end }} initContainers: - {{ if eq .Values.provider "openshift" }} + {{ if eq .Values.global.provider "openshift" }} - name: set-dir-owner - image: busybox:1.27.2 + image: busybox:1.31.0 command: ['sh', '-c', 'chown -R 1000:1000 /usr/share/elasticsearch/data'] volumeMounts: - name: esstorage mountPath: /usr/share/elasticsearch/data {{ end }} - name: set-max-map-count - image: busybox:1.27.2 + image: busybox:1.31.0 command: ['sysctl', '-w', 'vm.max_map_count=262144'] securityContext: privileged: true containers: - name: search - image: {{ .Values.search.image }} + image: {{ .Values.image }} securityContext: runAsUser: 1000 env: @@ -50,13 +51,17 @@ spec: value: {{ template "searchName" . }}-transport - name: ES_JAVA_OPTS value: "-Xmx2g -Xms2g" + - name: UNICAST_HOSTS + value: {{ template "searchName" . }}-transport + - name: NUMBER_OF_MASTERS + value: "{{ .Values.env.MINIMUM_MASTER_NODES }}" resources: requests: cpu: "0.25" memory: "2Gi" limits: - cpu: "{{ .Values.search.cpuLimit }}" - memory: "{{ .Values.search.memLimit }}" + cpu: "{{ .Values.cpuLimit }}" + memory: "{{ .Values.memLimit }}" ports: - containerPort: 9200 name: http @@ -65,9 +70,11 @@ spec: livenessProbe: tcpSocket: port: transport + initialDelaySeconds: 5 + periodSeconds: 10 readinessProbe: httpGet: - path: /_cluster/health + path: /_cat port: http initialDelaySeconds: 20 timeoutSeconds: 5 @@ -83,6 +90,6 @@ spec: accessModes: [ ReadWriteOnce ] resources: requests: - storage: {{ .Values.search.volumeSize }} - + storage: {{ .Values.volumeSize }} {{ end }} +{{ end }} \ No newline at end of file diff --git a/charts/pega/templates/pega-search-service.yaml b/charts/pega/charts/pegasearch/templates/pega-search-service.yaml similarity index 78% rename from charts/pega/templates/pega-search-service.yaml rename to charts/pega/charts/pegasearch/templates/pega-search-service.yaml index d340bc750..b60d87540 100644 --- a/charts/pega/templates/pega-search-service.yaml +++ b/charts/pega/charts/pegasearch/templates/pega-search-service.yaml @@ -1,6 +1,7 @@ +{{ if (eq (include "performDeployment" .) "true") }} # Only attempt to generate this file IF the user has NOT supplied # an external search URL which means we need to create one internally. -{{ if .Values.search.externalURL }} {{ else }} +{{ if (eq (include "isExternalSearch" .) "true") }} {{ else }} kind: Service apiVersion: v1 metadata: @@ -17,5 +18,5 @@ spec: - name: http port: 80 targetPort: 9200 - {{ end }} +{{ end }} \ No newline at end of file diff --git a/charts/pega/templates/pega-search-transport-service.yaml b/charts/pega/charts/pegasearch/templates/pega-search-transport-service.yaml similarity index 80% rename from charts/pega/templates/pega-search-transport-service.yaml rename to charts/pega/charts/pegasearch/templates/pega-search-transport-service.yaml index f73168acf..48ca43197 100644 --- a/charts/pega/templates/pega-search-transport-service.yaml +++ b/charts/pega/charts/pegasearch/templates/pega-search-transport-service.yaml @@ -1,6 +1,7 @@ +{{ if (eq (include "performDeployment" .) "true") }} # Only attempt to generate this file IF the user has NOT supplied # an external search URL which means we need to create one internally. -{{ if .Values.search.externalURL }} {{ else }} +{{ if (eq (include "isExternalSearch" .) "true") }} {{ else }} kind: Service apiVersion: v1 metadata: @@ -18,5 +19,5 @@ spec: selector: app: {{ template "searchName" . }} component: "Search" - {{ end }} +{{ end }} \ No newline at end of file diff --git a/charts/pega/charts/pegasearch/values.yaml b/charts/pega/charts/pegasearch/values.yaml new file mode 100644 index 000000000..469f01f56 --- /dev/null +++ b/charts/pega/charts/pegasearch/values.yaml @@ -0,0 +1,20 @@ +--- +# Enter the number of search nodes for Kubernetes to deploy (minimum 1). +replicas: 1 +# If externalURL is set, no search nodes will be deployed automatically, and Pega will use this search node url. +externalURL: "http://pega-search" +# Enter the docker image used to deploy Elasticsearch. This value will be ignored if using an external url. +# Push the Elasticsearch image to your internal docker registry. This must be the same registry as the docker section above. +image: "pegasystems/search" +# Enter the CPU limit for each search node (recommended 1). +cpuLimit: 1 +# Enter the Memory limit for each search node (recommended 4Gi). +memLimit: "4Gi" +# Enter the volume size limit for each search node (recommended 5Gi). +volumeSize: "5Gi" +env: + # IMPORTANT: https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#minimum_master_nodes + # To prevent data loss, it is vital to configure the discovery.zen.minimum_master_nodes setting so that each master-eligible + # node knows the minimum number of master-eligible nodes that must be visible in order to form a cluster. + # This value should be configured using formula (n/2) + 1 where n is replica count or desired capacity + MINIMUM_MASTER_NODES: "1" diff --git a/charts/pega/config/deploy/context.xml.tmpl b/charts/pega/config/deploy/context.xml.tmpl new file mode 100644 index 000000000..b1f2743b1 --- /dev/null +++ b/charts/pega/config/deploy/context.xml.tmpl @@ -0,0 +1,31 @@ + + + + WEB-INF/web.xml + + + + + + + + {{ if .Env.CUSTOMERDATA_SCHEMA }} + + {{ else }} + + {{ end }} + + diff --git a/charts/pega/config/prconfig.xml b/charts/pega/config/deploy/prconfig.xml similarity index 100% rename from charts/pega/config/prconfig.xml rename to charts/pega/config/deploy/prconfig.xml diff --git a/charts/pega/config/prlog4j2.xml b/charts/pega/config/deploy/prlog4j2.xml similarity index 88% rename from charts/pega/config/prlog4j2.xml rename to charts/pega/config/deploy/prlog4j2.xml index 878bb179d..b593f43e2 100644 --- a/charts/pega/config/prlog4j2.xml +++ b/charts/pega/config/deploy/prlog4j2.xml @@ -74,6 +74,17 @@ + + + + + %d (%30.30c{3}) %-5p - %m%n + + + + + + @@ -101,5 +112,8 @@ + + + diff --git a/charts/pega/requirements.yaml b/charts/pega/requirements.yaml index 6af0f1807..f1eb225d7 100644 --- a/charts/pega/requirements.yaml +++ b/charts/pega/requirements.yaml @@ -1,27 +1,5 @@ dependencies: -- name: traefik - version: "~1.52.2" - # repository: https://meshbincam.pega.com/artifactory/helm-traefik - repository: https://kubernetes-charts.storage.googleapis.com - condition: traefik.enabled - name: cassandra - version: "0.9.4" - # repository: https://meshbincam.pega.com/artifactory/helm-cassandra + version: "0.13.3" repository: https://kubernetes-charts-incubator.storage.googleapis.com/ - condition: cassandra.enabled -- name: elasticsearch - version: "1.15.1" - repository: https://kubernetes-charts.storage.googleapis.com/ - condition: elasticsearch.enabled -- name: fluentd-elasticsearch - version: "1.5.0" - repository: https://kubernetes-charts.storage.googleapis.com/ - condition: fluentd-elasticsearch.enabled -- name: kibana - version: "1.1.0" - repository: https://kubernetes-charts.storage.googleapis.com/ - condition: kibana.enabled -- name: metrics-server - version: "2.5.0" - repository: https://kubernetes-charts.storage.googleapis.com/ - condition: metrics-server.enabled + condition: cassandra.enabled \ No newline at end of file diff --git a/charts/pega/templates/_helpers.tpl b/charts/pega/templates/_helpers.tpl index 6977117f0..041985317 100644 --- a/charts/pega/templates/_helpers.tpl +++ b/charts/pega/templates/_helpers.tpl @@ -3,13 +3,42 @@ {{- define "pegaVolumeCredentials" }}pega-volume-credentials{{- end }} {{- define "pegaCredentialsSecret" }}pega-credentials-secret{{- end }} {{- define "pegaRegistrySecret" }}pega-registry-secret{{- end }} -{{- define "pegaWebName" -}}pega-web{{- end -}} -{{- define "pegaBatchName" -}}pega-batch{{- end -}} -{{- define "pegaStreamName" -}}pega-stream{{- end -}} -{{- define "searchName" -}}pega-search{{- end -}} +{{- define "deployConfig" -}}deploy-config{{- end -}} {{- define "imagePullSecret" }} -{{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.docker.registry.url (printf "%s:%s" .Values.docker.registry.username .Values.docker.registry.password | b64enc) | b64enc }} +{{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.global.docker.registry.url (printf "%s:%s" .Values.global.docker.registry.username .Values.global.docker.registry.password | b64enc) | b64enc }} +{{- end }} + +{{- define "performOnlyDeployment" }} + {{- if (eq .Values.global.actions.execute "deploy") -}} + true + {{- else -}} + false + {{- end -}} +{{- end }} + +{{- define "performDeployment" }} + {{- if or (eq .Values.global.actions.execute "deploy") (eq .Values.global.actions.execute "install-deploy") (eq .Values.global.actions.execute "upgrade-deploy") -}} + true + {{- else -}} + false + {{- end -}} +{{- end }} + +{{- define "performInstallAndDeployment" }} + {{- if (eq .Values.global.actions.execute "install-deploy") -}} + true + {{- else -}} + false + {{- end -}} +{{- end }} + +{{- define "performUpgradeAndDeployment" }} + {{- if (eq .Values.global.actions.execute "upgrade-deploy") -}} + true + {{- else -}} + false + {{- end -}} {{- end }} # list of either external or internal cassandra nodes @@ -47,19 +76,11 @@ {{- end -}} {{- end }} -{{- define "properPegaSearchURL" }} - {{- if .Values.search.externalURL -}} - {{ .Values.search.externalURL }} - {{- else -}} - http://{{ template "searchName" . }} - {{- end -}} -{{- end }} - {{- define "waitForPegaSearch" -}} - name: wait-for-pegasearch - image: busybox:1.27.2 + image: busybox:1.31.0 # Init container for waiting for Elastic Search to initialize. The URL should point at your Elastic Search instance. - command: ['sh', '-c', 'until $(wget -q -S --spider --timeout=2 -O /dev/null {{ include "properPegaSearchURL" . }}); do echo Waiting for search to become live...; sleep 10; done;'] + command: ['sh', '-c', 'until $(wget -q -S --spider --timeout=2 -O /dev/null {{ .Values.pegasearch.externalURL }}); do echo Waiting for search to become live...; sleep 10; done;'] {{- end }} {{- define "waitForCassandra" -}} @@ -95,66 +116,19 @@ until cqlsh -u {{ $cassandraUser | quote }} -p {{ $cassandraPassword | quote }} value: "{{ .node.javaOpts }}" # Initial JVM heap size, equivalent to -Xms - name: INITIAL_HEAP +{{- if .node.initialHeap }} value: "{{ .node.initialHeap }}" +{{- else }} + value: "4096m" +{{- end }} # Maximum JVM heap size, equivalent to -Xmx - name: MAX_HEAP +{{- if .node.maxHeap }} value: "{{ .node.maxHeap }}" -{{- end -}} - -{{- define "commonEnvironmentVariables" -}} -- name: CASSANDRA_CLUSTER - valueFrom: - configMapKeyRef: - name: {{ template "pegaEnvironmentConfig" }} - key: CASSANDRA_CLUSTER -- name: CASSANDRA_NODES - valueFrom: - configMapKeyRef: - name: {{ template "pegaEnvironmentConfig" }} - key: CASSANDRA_NODES -- name: CASSANDRA_PORT - valueFrom: - configMapKeyRef: - name: {{ template "pegaEnvironmentConfig" }} - key: CASSANDRA_PORT -- name: PEGA_SEARCH_URL - valueFrom: - configMapKeyRef: - name: {{ template "pegaEnvironmentConfig" }} - key: PEGA_SEARCH_URL -- name: JDBC_URL - valueFrom: - configMapKeyRef: - name: {{ template "pegaEnvironmentConfig" }} - key: JDBC_URL -- name: JDBC_CLASS - valueFrom: - configMapKeyRef: - name: {{ template "pegaEnvironmentConfig" }} - key: JDBC_CLASS -- name: JDBC_DRIVER_URI - valueFrom: - configMapKeyRef: - name: {{ template "pegaEnvironmentConfig" }} - key: JDBC_DRIVER_URI -- name: RULES_SCHEMA - valueFrom: - configMapKeyRef: - name: {{ template "pegaEnvironmentConfig" }} - key: RULES_SCHEMA -- name: DATA_SCHEMA - valueFrom: - configMapKeyRef: - name: {{ template "pegaEnvironmentConfig" }} - key: DATA_SCHEMA -- name: CUSTOMERDATA_SCHEMA - valueFrom: - configMapKeyRef: - name: {{ template "pegaEnvironmentConfig" }} - key: CUSTOMERDATA_SCHEMA -- name: DL-NAME - value: EMPTY +{{- else }} + value: "7168m" {{- end }} +{{- end -}} {{- define "pega.health.probes" -}} # LivenessProbe: indicates whether the container is live, i.e. running. @@ -204,3 +178,12 @@ readinessProbe: # Pod will be marked Unready. Defaults to 3. Minimum value is 1. failureThreshold: 3 {{- end }} + +# Evaluate background node types based on cassandra enabled or not(internally or externally) +{{- define "evaluateBackgroundNodeTypes" }} + {{- if eq (include "cassandraEnabled" .) "true" -}} + BackgroundProcessing,Search,Batch,RealTime,Custom1,Custom2,Custom3,Custom4,Custom5,BIX,ADM,RTDG + {{- else -}} + Background + {{- end -}} +{{- end }} diff --git a/charts/pega/templates/_pega-config.tpl b/charts/pega/templates/_pega-config.tpl index 5a06c65ac..ddd3fc8d1 100644 --- a/charts/pega/templates/_pega-config.tpl +++ b/charts/pega/templates/_pega-config.tpl @@ -1,4 +1,5 @@ {{- define "pega.config" -}} +{{- $arg := .mode -}} # Node type specific configuration for {{ .name }} kind: ConfigMap apiVersion: v1 @@ -6,10 +7,51 @@ metadata: name: {{ .name }} namespace: {{ .root.Release.Namespace }} data: - # prconfig file to be used by {{ .name }} + +# Start of Pega Deployment Configuration + +{{ if eq $arg "deploy-config" }} + +{{- $prconfigPath := "config/deploy/prconfig.xml" }} +{{- $contextXMLTemplate := "config/deploy/context.xml.tmpl" }} +{{- $prlog4j2Path := "config/deploy/prlog4j2.xml" }} + +{{- if .custom }} +{{- if .custom.prconfig }} + # CUSTOM prconfig file to be used by {{ .name }} prconfig.xml: |- -{{ .root.Files.Get .node.prconfigPath | indent 6 }} +{{ .custom.prconfig | indent 6 }} +{{ else if $prconfig := .root.Files.Glob $prconfigPath }} + # prconfig file to be used by {{ .name }} + prconfig.xml: |- +{{ .root.Files.Get $prconfigPath | indent 6 }} +{{- end }} +{{ else if $prconfig := .root.Files.Glob $prconfigPath }} + # prconfig file to be used by {{ .name }} + prconfig.xml: |- +{{ .root.Files.Get $prconfigPath | indent 6 }} +{{- end }} + +{{ if $contextXML := .root.Files.Glob $contextXMLTemplate }} + # contextXMLTemplate to be used by {{ .name }} + context.xml.tmpl: |- +{{ .root.Files.Get $contextXMLTemplate | indent 6 }} +{{- end }} + +{{- if .custom }} +{{- if .custom.context }} + # CUSTOM context file to be used by {{ .name }} + context.xml: |- +{{ .custom.context | indent 6 }} +{{- end }} +{{- end }} + # prlog4j2 file to be used by {{ .name }} prlog4j2.xml: |- -{{ .root.Files.Get .node.prlog4j2Path | indent 6 }} +{{ .root.Files.Get $prlog4j2Path | indent 6 }} + +{{- end }} +# End of Pega Deployment Configuration +--- {{- end }} + diff --git a/charts/pega/templates/_pega-deployment.tpl b/charts/pega/templates/_pega-deployment.tpl index d226234da..d058b6a70 100644 --- a/charts/pega/templates/_pega-deployment.tpl +++ b/charts/pega/templates/_pega-deployment.tpl @@ -5,18 +5,27 @@ metadata: name: {{ .name }} namespace: {{ .root.Release.Namespace }} labels: - app: {{ template "pegaWebName" .root }} {{/* This is intentionally always the web name because that's what we call our "app" */}} + app: {{ .name }} {{/* This is intentionally always the web name because that's what we call our "app" */}} component: Pega spec: # Replicas specify the number of copies for {{ .name }} replicas: {{ .node.replicas }} +{{- if (eq .kind "Deployment") }} + progressDeadlineSeconds: 2147483647 +{{- end }} selector: matchLabels: app: {{ .name }} +{{- if .node.deploymentStrategy }} + strategy: +{{ toYaml .node.deploymentStrategy | indent 4 }} +{{- end }} template: metadata: labels: app: {{ .name }} + annotations: + config-check: {{ include (print .root.Template.BasePath "/pega-environment-config.yaml") .root | sha256sum }} spec: volumes: # Volume used to mount config files. @@ -32,41 +41,91 @@ spec: secretName: {{ template "pegaCredentialsSecret" }} # Used to specify permissions on files within the volume. defaultMode: 420 +{{- if .custom }} +{{- if .custom.volumes }} + # Additional custom volumes +{{ toYaml .custom.volumes | indent 6 }} +{{- end }} +{{- end }} initContainers: {{- range $i, $val := .initContainers }} {{ include $val $.root | indent 6 }} +{{- end }} +{{- if .custom }} +{{- if .custom.initContainers }} + # Additional custom init containers +{{ toYaml .custom.initContainers | indent 6 }} +{{- end }} {{- end }} containers: # Name of the container - name: pega-web-tomcat # The pega image, you may use the official pega distribution or you may extend # and host it yourself. See the image documentation for more information. - image: {{ .root.Values.docker.image }} + image: {{ .root.Values.global.docker.pega.image }} # Pod (app instance) listens on this port ports: - containerPort: 8080 + name: pega-web-port +{{- if .custom }} +{{- if .custom.ports }} + # Additional custom ports +{{ toYaml .custom.ports | indent 8 }} +{{- end }} +{{- end }} # Specify any of the container environment variables here env: # Node type of the Pega nodes for {{ .name }} - name: NODE_TYPE value: {{ .nodeType }} +{{- if .custom }} +{{- if .custom.env }} + # Additional custom env vars +{{ toYaml .custom.env | indent 8 }} +{{- end }} +{{- end }} {{ include "pega.jvmconfig" (dict "node" .node) | indent 8 }} -{{ include "commonEnvironmentVariables" .root | indent 8 }} + envFrom: + - configMapRef: + name: {{ template "pegaEnvironmentConfig" }} resources: # Maximum CPU and Memory that the containers for {{ .name }} can use limits: + {{- if .node.cpuLimit }} cpu: "{{ .node.cpuLimit }}" + {{- else }} + cpu: 2 + {{- end }} + {{- if .node.memLimit }} memory: "{{ .node.memLimit }}" + {{- else }} + memory: "8Gi" + {{- end }} # CPU and Memory that the containers for {{ .name }} request requests: - cpu: "200m" - memory: "2Gi" + {{- if .node.cpuRequest }} + cpu: "{{ .node.cpuRequest }}" + {{- else }} + cpu: 200m + {{- end }} + {{- if .node.memRequest }} + memory: "{{ .node.memRequest }}" + {{- else }} + memory: "6Gi" + {{- end }} volumeMounts: # The given mountpath is mapped to volume with the specified name. The config map files are mounted here. - name: {{ template "pegaVolumeConfig" }} mountPath: "/opt/pega/config" -{{- if .extraVolume }} -{{ include .extraVolume .root | indent 8 }} +{{- if (.node.volumeClaimTemplate) }} + - name: {{ .name }} + mountPath: "/opt/pega/streamvol" +{{- end }} +{{- if .custom }} +{{- if .custom.volumeMounts }} + # Additional custom mounts +{{ toYaml .custom.volumeMounts | indent 8 }} +{{- end }} {{- end }} - name: {{ template "pegaVolumeCredentials" }} mountPath: "/opt/pega/secrets" @@ -79,7 +138,18 @@ spec: # If the image is in a protected registry, you must specify a secret to access it. imagePullSecrets: - name: {{ template "pegaRegistrySecret" }} -{{- if .extraSpecData }} -{{ include .extraSpecData .root | indent 2 }} +{{- if (.node.volumeClaimTemplate) }} + volumeClaimTemplates: + - metadata: + name: {{ .name }} + creationTimestamp: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .node.volumeClaimTemplate.resources.requests.storage }} + serviceName: {{ .name }} {{- end }} +--- {{- end -}} diff --git a/charts/pega/templates/_pega-eks-ingress.tpl b/charts/pega/templates/_pega-eks-ingress.tpl new file mode 100644 index 000000000..62afa21df --- /dev/null +++ b/charts/pega/templates/_pega-eks-ingress.tpl @@ -0,0 +1,37 @@ +{{- define "pega.eks.ingress" -}} +# Ingress to be used for {{ .name }} +kind: Ingress +apiVersion: extensions/v1beta1 +metadata: + name: {{ .name }} + namespace: {{ .root.Release.Namespace }} + annotations: + # Ingress class used is 'alb' + kubernetes.io/ingress.class: alb + # specifies the ports that ALB used to listen on + alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS": 443}]' + # set the redirect action to redirect http traffic into https + alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}' + # override the default scheme internal as ALB should be internet-facing + alb.ingress.kubernetes.io/scheme: internet-facing + # enable sticky sessions on target group + alb.ingress.kubernetes.io/target-group-attributes: stickiness.enabled=true,stickiness.lb_cookie.duration_seconds={{ .node.service.alb_stickiness_lb_cookie_duration_seconds }} + # set to ip mode to route traffic directly to the pods ip + alb.ingress.kubernetes.io/target-type: ip +spec: + rules: + - http: + paths: + - backend: + serviceName: ssl-redirect + servicePort: use-annotation + # The calls will be redirected from {{ .node.domain }} to below mentioned backend serviceName and servicePort. + # To access the below service, along with {{ .node.domain }}, alb http port also has to be provided in the URL. + - host: {{ .node.service.domain }} + http: + paths: + - backend: + serviceName: {{ .name }} + servicePort: {{ .node.service.port }} +--- +{{- end }} diff --git a/charts/pega/templates/_pega-k8s-ingress.tpl b/charts/pega/templates/_pega-k8s-ingress.tpl new file mode 100644 index 000000000..5d4bf5169 --- /dev/null +++ b/charts/pega/templates/_pega-k8s-ingress.tpl @@ -0,0 +1,22 @@ +{{- define "pega.k8s.ingress" -}} +# Ingress to be used for {{ .name }} +kind: Ingress +apiVersion: extensions/v1beta1 +metadata: + name: {{ .name }} + namespace: {{ .root.Release.Namespace }} + annotations: + # Ingress class used is 'traefik' + kubernetes.io/ingress.class: traefik +spec: + rules: + # The calls will be redirected from {{ .node.domain }} to below mentioned backend serviceName and servicePort. + # To access the below service, along with {{ .node.domain }}, traefik http port also has to be provided in the URL. + - host: {{ .node.service.domain }} + http: + paths: + - backend: + serviceName: {{ .name }} + servicePort: {{ .node.service.port }} +--- +{{- end }} diff --git a/charts/pega/templates/_pega-ingress.tpl b/charts/pega/templates/_pega-openshift-ingress.tpl similarity index 60% rename from charts/pega/templates/_pega-ingress.tpl rename to charts/pega/templates/_pega-openshift-ingress.tpl index 43ac9a4fc..96a6d8cae 100644 --- a/charts/pega/templates/_pega-ingress.tpl +++ b/charts/pega/templates/_pega-openshift-ingress.tpl @@ -1,5 +1,4 @@ -{{- define "pega.ingress" -}} -{{- if eq .root.Values.provider "openshift" -}} +{{- define "pega.openshift.ingress" -}} # Route to be used for {{ .name }} kind: Route apiVersion: route.openshift.io/v1 @@ -22,28 +21,5 @@ spec: # Edge-terminated routes can specify an insecureEdgeTerminationPolicy that enables traffic on insecure schemes (HTTP) to be disabled, allowed or redirected. (None/Allow/Redirect/EMPTY_VALUE) insecureEdgeTerminationPolicy: Redirect termination: edge - -{{- else -}} -# Ingress to be used for {{ .name }} -kind: Ingress -apiVersion: extensions/v1beta1 -metadata: - name: {{ .name }} - namespace: {{ .root.Release.Namespace }} - annotations: - # Ingress class used is 'traefik' - kubernetes.io/ingress.class: traefik -spec: - rules: - # The calls will be redirected from {{ .node.domain }} to below mentioned backend serviceName and servicePort. - # To access the below service, along with {{ .node.domain }}, traefik http port also has to be provided in the URL. - - host: {{ .node.domain }} - http: - paths: - - backend: - serviceName: {{ .name }} - servicePort: {{ .port }} - -{{- end -}} - -{{- end }} +--- +{{- end }} \ No newline at end of file diff --git a/charts/pega/templates/_pega-service.tpl b/charts/pega/templates/_pega-service.tpl index 1c1aae2a9..3c5d88237 100644 --- a/charts/pega/templates/_pega-service.tpl +++ b/charts/pega/templates/_pega-service.tpl @@ -6,7 +6,8 @@ metadata: # Name of the service for name: {{ .name }} namespace: {{ .root.Release.Namespace }} - annotations: + {{- if and (ne .root.Values.global.provider "eks") (ne .root.Values.global.provider "openshift") }} + annotations: # Enable backend sticky sessions traefik.ingress.kubernetes.io/affinity: 'true' # Override the default wrr load balancer algorithm. @@ -16,7 +17,9 @@ metadata: traefik.ingress.kubernetes.io/max-conn-amount: '10' # Manually set the cookie name for sticky sessions traefik.ingress.kubernetes.io/session-cookie-name: UNIQUE-PEGA-COOKIE-NAME + {{ end }} spec: + type: NodePort # Specification of on which port the service is enabled ports: - name: http @@ -24,4 +27,5 @@ spec: targetPort: {{ .targetPort }} selector: app: {{ .name }} +--- {{- end -}} \ No newline at end of file diff --git a/charts/pega/templates/_pega_hpa.tpl b/charts/pega/templates/_pega_hpa.tpl index a991fac73..f376ab635 100644 --- a/charts/pega/templates/_pega_hpa.tpl +++ b/charts/pega/templates/_pega_hpa.tpl @@ -11,16 +11,33 @@ spec: apiVersion: extensions/v1beta1 kind: Deployment name: {{ .deploymentName | quote }} + {{- if .hpa.minReplicas }} minReplicas: {{ .hpa.minReplicas }} + {{- else }} + minReplicas: 1 + {{- end }} + {{- if .hpa.maxReplicas }} maxReplicas: {{ .hpa.maxReplicas }} + {{- else }} + maxReplicas: 5 + {{- end }} metrics: - type: Resource resource: name: cpu + {{- if .hpa.targetAverageCPUUtilization }} targetAverageUtilization: {{ .hpa.targetAverageCPUUtilization }} + {{- else }} + targetAverageUtilization: 700 + {{- end }} - type: Resource resource: name: memory + {{- if .hpa.targetAverageMemoryUtilization }} targetAverageUtilization: {{ .hpa.targetAverageMemoryUtilization }} + {{- else }} + targetAverageUtilization: 85 + {{- end }} +--- {{- end -}} {{- end -}} \ No newline at end of file diff --git a/charts/pega/templates/pega-action-validate.yaml b/charts/pega/templates/pega-action-validate.yaml new file mode 100644 index 000000000..4b20b4cce --- /dev/null +++ b/charts/pega/templates/pega-action-validate.yaml @@ -0,0 +1,4 @@ +{{- $validActions := list "install" "deploy" "install-deploy" "upgrade" "upgrade-deploy" }} +{{- if not (has .Values.global.actions.execute $validActions) }} +{{- fail "Action value is not correct. The valid values are 'install' 'deploy' 'install-deploy' 'upgrade' 'upgrade-deploy'" }} +{{- end }} \ No newline at end of file diff --git a/charts/pega/templates/pega-batch-config.yaml b/charts/pega/templates/pega-batch-config.yaml deleted file mode 100644 index dfd2650f9..000000000 --- a/charts/pega/templates/pega-batch-config.yaml +++ /dev/null @@ -1 +0,0 @@ -{{ template "pega.config" dict "root" $ "node" .Values.batch "name" (include "pegaBatchName" .) }} diff --git a/charts/pega/templates/pega-batch-deployment.yaml b/charts/pega/templates/pega-batch-deployment.yaml deleted file mode 100644 index 053bfb1bb..000000000 --- a/charts/pega/templates/pega-batch-deployment.yaml +++ /dev/null @@ -1 +0,0 @@ -{{ template "pega.deployment" dict "root" $ "node" .Values.batch "name" (include "pegaBatchName" .) "kind" "Deployment" "apiVersion" "extensions/v1beta1" "nodeType" "Background" "initContainers" (list "waitForPegaSearch" "waitForCassandra") }} \ No newline at end of file diff --git a/charts/pega/templates/pega-batch-hpa.yaml b/charts/pega/templates/pega-batch-hpa.yaml deleted file mode 100644 index 4da9426da..000000000 --- a/charts/pega/templates/pega-batch-hpa.yaml +++ /dev/null @@ -1 +0,0 @@ -{{ template "pega.hpa" dict "root" $ "name" (printf "%s-hpa" (include "pegaBatchName" .)) "deploymentName" (include "pegaBatchName" .) "hpa" .Values.batch.hpa}} \ No newline at end of file diff --git a/charts/pega/templates/pega-credentials-secret.yaml b/charts/pega/templates/pega-credentials-secret.yaml index 80a22c27f..de57fcaf5 100644 --- a/charts/pega/templates/pega-credentials-secret.yaml +++ b/charts/pega/templates/pega-credentials-secret.yaml @@ -5,15 +5,22 @@ metadata: namespace: {{ .Release.Namespace }} data: # Base64 encdoded username for connecting to the Pega DB - DB_USERNAME: {{ .Values.jdbc.username | b64enc }} + DB_USERNAME: {{ .Values.global.jdbc.username | b64enc }} # Base64 encdoded password for connecting to the Pega DB - DB_PASSWORD: {{ .Values.jdbc.password | b64enc }} + DB_PASSWORD: {{ .Values.global.jdbc.password | b64enc }} + + {{ if (eq (include "performDeployment" .) "true") }} # Base64 encdoded username for connecting to cassandra CASSANDRA_USERNAME: {{ .Values.dds.username | b64enc }} # Base64 encdoded password for connecting to cassandra CASSANDRA_PASSWORD: {{ .Values.dds.password | b64enc }} + {{ range $index, $dep := .Values.global.tier}} + {{ if and ($dep.pegaDiagnosticUser) (eq $dep.name "web") }} # Base64 encdoded username for a Tomcat user that will be created with the PegaDiagnosticUser role - PEGA_DIAGNOSTIC_USER: {{ .Values.web.pegaDiagnosticUser | b64enc }} + PEGA_DIAGNOSTIC_USER: {{ $dep.pegaDiagnosticUser | b64enc }} # Base64 encdoded password for a Tomcat user that will be created with the PegaDiagnosticUser role - PEGA_DIAGNOSTIC_PASSWORD: {{ .Values.web.pegaDiagnosticPassword | b64enc }} + PEGA_DIAGNOSTIC_PASSWORD: {{ $dep.pegaDiagnosticPassword | b64enc }} + {{ end }} + {{ end }} +{{ end }} type: Opaque diff --git a/charts/pega/templates/pega-environment-config.yaml b/charts/pega/templates/pega-environment-config.yaml index c70631f66..d3e4d1ccb 100644 --- a/charts/pega/templates/pega-environment-config.yaml +++ b/charts/pega/templates/pega-environment-config.yaml @@ -1,27 +1,37 @@ # Config map used for common configuration between Pega nodes +{{ if (eq (include "performDeployment" .) "true") }} kind: ConfigMap apiVersion: v1 metadata: name: {{ template "pegaEnvironmentConfig" }} namespace: {{ .Release.Namespace }} data: + # Database Type for installation + DB_TYPE: {{ .Values.global.jdbc.dbType }} # JDBC URL of the DB where Pega is installed - JDBC_URL: {{ .Values.jdbc.url }} + JDBC_URL: {{ .Values.global.jdbc.url }} # Class name of the DB's JDBC driver - JDBC_CLASS: {{ .Values.jdbc.driverClass }} + JDBC_CLASS: {{ .Values.global.jdbc.driverClass }} # URI that the JDBC driver can be downloaded from - JDBC_DRIVER_URI: {{ .Values.jdbc.driverUri }} + JDBC_DRIVER_URI: {{ .Values.global.jdbc.driverUri }} + # The connection properties that will be sent to our JDBC driver when establishing new connections + JDBC_CONNECTION_PROPERTIES: {{ .Values.global.jdbc.connectionProperties }} # Rules schema of the Pega installation - RULES_SCHEMA: {{ .Values.jdbc.rulesSchema }} +{{ if (eq (include "performUpgradeAndDeployment" .) "true") }} + RULES_SCHEMA: {{ .Values.installer.upgrade.targetRulesSchema }} +{{ else }} + RULES_SCHEMA: {{ .Values.global.jdbc.rulesSchema }} +{{ end }} # Data schema of the Pega installation - DATA_SCHEMA: {{ .Values.jdbc.dataSchema }} + DATA_SCHEMA: {{ .Values.global.jdbc.dataSchema }} # CustomerData schema of the Pega installation - CUSTOMERDATA_SCHEMA: {{ .Values.jdbc.customerDataSchema }} + CUSTOMERDATA_SCHEMA: {{ .Values.global.jdbc.customerDataSchema }} # URL to connect to Elastic Search - PEGA_SEARCH_URL: {{ include "properPegaSearchURL" . }} + PEGA_SEARCH_URL: {{ .Values.pegasearch.externalURL }} # Whether to enable connecting to a cassandra cluster. "true" for enabled, "false for disabled" CASSANDRA_CLUSTER: "{{ include "cassandraEnabled" . }}" # Comma separated list of cassandra hosts CASSANDRA_NODES: "{{ include "cassandraNodes" . }}" # Port to connect to cassandra with - CASSANDRA_PORT: "{{ .Values.dds.port }}" + CASSANDRA_PORT: "{{ .Values.dds.port }}" +{{ end }} diff --git a/charts/pega/templates/pega-stream-config.yaml b/charts/pega/templates/pega-stream-config.yaml deleted file mode 100644 index 5c34077cd..000000000 --- a/charts/pega/templates/pega-stream-config.yaml +++ /dev/null @@ -1 +0,0 @@ -{{ template "pega.config" dict "root" $ "node" .Values.stream "name" (include "pegaStreamName" .) }} \ No newline at end of file diff --git a/charts/pega/templates/pega-stream-deployment.yaml b/charts/pega/templates/pega-stream-deployment.yaml deleted file mode 100644 index 69be019ee..000000000 --- a/charts/pega/templates/pega-stream-deployment.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{{- define "pega.stream.volume" -}} -- name: pega-stream - mountPath: "/opt/pega" -{{- end -}} -{{- define "pega.stream.extraData" -}} -volumeClaimTemplates: -- metadata: - name: {{ template "pegaStreamName" . }} - creationTimestamp: - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 5Gi -serviceName: {{ template "pegaStreamName" . }} -{{- end -}} -{{ template "pega.deployment" dict "root" $ "node" .Values.stream "name" (include "pegaStreamName" .) "kind" "StatefulSet" "apiVersion" "apps/v1beta2" "nodeType" "Stream" "initContainers" (list "waitForPegaSearch") "extraVolume" "pega.stream.volume" "extraSpecData" "pega.stream.extraData" }} diff --git a/charts/pega/templates/pega-stream-ingress.yaml b/charts/pega/templates/pega-stream-ingress.yaml deleted file mode 100644 index 8c3579b5c..000000000 --- a/charts/pega/templates/pega-stream-ingress.yaml +++ /dev/null @@ -1 +0,0 @@ -{{ template "pega.ingress" dict "root" $ "node" .Values.stream "port" 7003 "name" (include "pegaStreamName" .) }} \ No newline at end of file diff --git a/charts/pega/templates/pega-stream-service.yaml b/charts/pega/templates/pega-stream-service.yaml deleted file mode 100644 index eedfc8d2c..000000000 --- a/charts/pega/templates/pega-stream-service.yaml +++ /dev/null @@ -1 +0,0 @@ -{{ template "pega.service" dict "root" $ "name" (include "pegaStreamName" .) "port" 7003 "targetPort" 7003 }} \ No newline at end of file diff --git a/charts/pega/templates/pega-tier-config.yaml b/charts/pega/templates/pega-tier-config.yaml new file mode 100644 index 000000000..4cf0e033b --- /dev/null +++ b/charts/pega/templates/pega-tier-config.yaml @@ -0,0 +1,5 @@ +{{ if (eq (include "performDeployment" $) "true") }} +{{ range $index, $dep := .Values.global.tier}} +{{ template "pega.config" dict "root" $ "node" $dep "name" (printf "pega-%s" $dep.name) "mode" (include "deployConfig" $) "custom" $dep.custom }} +{{ end }} +{{ end }} \ No newline at end of file diff --git a/charts/pega/templates/pega-tier-deployment.yaml b/charts/pega/templates/pega-tier-deployment.yaml new file mode 100644 index 000000000..ef1b3dc61 --- /dev/null +++ b/charts/pega/templates/pega-tier-deployment.yaml @@ -0,0 +1,35 @@ +{{ $kindName := "" }} +{{ $apiVer := "" }} +{{ range $index, $dep := .Values.global.tier}} +{{ if ($dep.volumeClaimTemplate) }} +{{ $kindName = "StatefulSet" }} +{{ $apiVer = "apps/v1beta2" }} +{{ else }} +{{ $kindName = "Deployment" }} +{{ $apiVer = "extensions/v1beta1" }} +{{ end }} + +{{ $containerWaitList := list }} +{{ if (eq (include "performOnlyDeployment" $) "true") }} +{{ $containerWaitList = append $containerWaitList "waitForPegaSearch" }} +{{ if (eq (include "cassandraEnabled" $) "true" ) }} +{{ $containerWaitList = append $containerWaitList "waitForCassandra" }} +{{ end }} +{{ end }} + +{{ if (eq (include "performInstallAndDeployment" $) "true") }} +{{ $containerWaitList = append $containerWaitList "waitForPegaDBInstall" }} +{{ $containerWaitList = append $containerWaitList "waitForPegaSearch" }} +{{ if (eq (include "cassandraEnabled" $) "true" ) }} +{{ $containerWaitList = append $containerWaitList "waitForCassandra" }} +{{ end }} +{{ end }} + +{{ if (eq (include "performUpgradeAndDeployment" $) "true") }} +{{ $containerWaitList = append $containerWaitList "waitForPegaDBUpgrade" }} +{{ end }} + +{{- if or (eq (include "performOnlyDeployment" $) "true") (eq (include "performInstallAndDeployment" $) "true") (eq (include "performUpgradeAndDeployment" $) "true") }} +{{ template "pega.deployment" dict "root" $ "node" $dep "name" (printf "pega-%s" $dep.name) "kind" $kindName "apiVersion" $apiVer "nodeType" $dep.nodeType "initContainers" $containerWaitList "custom" $dep.custom }} +{{- end }} +{{ end }} \ No newline at end of file diff --git a/charts/pega/templates/pega-tier-hpa.yaml b/charts/pega/templates/pega-tier-hpa.yaml new file mode 100644 index 000000000..343755407 --- /dev/null +++ b/charts/pega/templates/pega-tier-hpa.yaml @@ -0,0 +1,7 @@ +{{ if (eq (include "performDeployment" $) "true") }} +{{ range $dep := .Values.global.tier }} +{{ if ($dep.hpa) }} +{{ template "pega.hpa" dict "root" $ "name" (printf "%s-hpa" (printf "pega-%s" $dep.name)) "deploymentName" (printf "pega-%s" $dep.name) "hpa" $dep.hpa }} +{{ end }} +{{ end }} +{{ end }} \ No newline at end of file diff --git a/charts/pega/templates/pega-tier-ingress.yaml b/charts/pega/templates/pega-tier-ingress.yaml new file mode 100644 index 000000000..067a70c4d --- /dev/null +++ b/charts/pega/templates/pega-tier-ingress.yaml @@ -0,0 +1,12 @@ +{{- range $index, $dep := .Values.global.tier }} +{{ if and (eq (include "performDeployment" $ ) "true") ($dep.service) }} +{{ $alb := index $.Values "aws-alb-ingress-controller" }} +{{- if eq $.Values.global.provider "openshift" -}} +{{ template "pega.openshift.ingress" dict "root" $ "node" $dep "name" (printf "pega-%s" $dep.name) }} +{{- else if and (eq $.Values.global.provider "eks") -}} +{{ template "pega.eks.ingress" dict "root" $ "node" $dep "name" (printf "pega-%s" $dep.name) }} +{{- else -}} +{{ template "pega.k8s.ingress" dict "root" $ "node" $dep "name" (printf "pega-%s" $dep.name) }} +{{- end -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/charts/pega/templates/pega-tier-service.yaml b/charts/pega/templates/pega-tier-service.yaml new file mode 100644 index 000000000..abfb50592 --- /dev/null +++ b/charts/pega/templates/pega-tier-service.yaml @@ -0,0 +1,7 @@ +{{ if (eq (include "performDeployment" $) "true") }} +{{ range $index, $dep := .Values.global.tier }} +{{ if ($dep.service) }} +{{ template "pega.service" dict "root" $ "name" (printf "pega-%s" $dep.name) "port" $dep.service.port "targetPort" $dep.service.targetPort }} +{{ end }} +{{ end }} +{{ end }} \ No newline at end of file diff --git a/charts/pega/templates/pega-web-config.yaml b/charts/pega/templates/pega-web-config.yaml deleted file mode 100644 index 66449b4e9..000000000 --- a/charts/pega/templates/pega-web-config.yaml +++ /dev/null @@ -1 +0,0 @@ -{{ template "pega.config" dict "root" $ "node" .Values.web "name" (include "pegaWebName" .) }} \ No newline at end of file diff --git a/charts/pega/templates/pega-web-deployment.yaml b/charts/pega/templates/pega-web-deployment.yaml deleted file mode 100644 index 1d2e33816..000000000 --- a/charts/pega/templates/pega-web-deployment.yaml +++ /dev/null @@ -1 +0,0 @@ -{{ template "pega.deployment" dict "root" $ "node" .Values.web "name" (include "pegaWebName" .) "kind" "Deployment" "apiVersion" "extensions/v1beta1" "nodeType" "Foreground" "initContainers" (list "waitForPegaSearch") }} diff --git a/charts/pega/templates/pega-web-hpa.yaml b/charts/pega/templates/pega-web-hpa.yaml deleted file mode 100644 index 1436319ff..000000000 --- a/charts/pega/templates/pega-web-hpa.yaml +++ /dev/null @@ -1 +0,0 @@ -{{ template "pega.hpa" dict "root" $ "name" (printf "%s-hpa" (include "pegaWebName" .)) "deploymentName" (include "pegaWebName" .) "hpa" .Values.web.hpa}} \ No newline at end of file diff --git a/charts/pega/templates/pega-web-ingress.yaml b/charts/pega/templates/pega-web-ingress.yaml deleted file mode 100644 index 528681e55..000000000 --- a/charts/pega/templates/pega-web-ingress.yaml +++ /dev/null @@ -1 +0,0 @@ -{{ template "pega.ingress" dict "root" $ "node" .Values.web "port" 80 "name" (include "pegaWebName" .) }} \ No newline at end of file diff --git a/charts/pega/templates/pega-web-service.yaml b/charts/pega/templates/pega-web-service.yaml deleted file mode 100644 index 02cd5e856..000000000 --- a/charts/pega/templates/pega-web-service.yaml +++ /dev/null @@ -1 +0,0 @@ -{{ template "pega.service" dict "root" $ "name" (include "pegaWebName" .) "port" 80 "targetPort" 8080 }} \ No newline at end of file diff --git a/charts/pega/values-large.yaml b/charts/pega/values-large.yaml new file mode 100644 index 000000000..3b4d975a4 --- /dev/null +++ b/charts/pega/values-large.yaml @@ -0,0 +1,221 @@ +--- +global: + # This values.yaml file is an example of a large Pega deployment. + # For more information about each configuration option, see the + # project readme. + + # Enter your Kubernetes provider. + provider: "YOUR_KUBERNETES_PROVIDER" + + # Deploy Pega nodes + actions: + execute: "deploy" + + # Provide JDBC connection information to the Pega relational database + # If you are installing or upgrading on IBM DB2, update the udb.conf file in the /charts/pega/charts/installer/config/udb directory with any additional connection properties. + jdbc: + # url Valid values are: + # + # Oracle jdbc:oracle:thin:@//localhost:1521/dbName + # IBM DB/2 z / OS jdbc:db2://localhost:50000/dbName + # IBM DB/2 jdbc:db2://localhost:50000/dbName:fullyMaterializeLobData=true;fullyMaterializeInputStreams=true; + # progressiveStreaming=2;useJDBC4ColumnNameAndLabelSemantics=2; + # SQL Server jdbc:sqlserver://localhost:1433;databaseName=dbName;selectMethod=cursor;sendStringParametersAsUnicode=false + # PostgreSQL jdbc:postgresql://localhost:5432/dbName + url: "YOUR_JDBC_URL" + # driverClass -- jdbc class. Valid values are: + # + # Oracle oracle.jdbc.OracleDriver + # IBM DB/2 com.ibm.db2.jcc.DB2Driver + # SQL Server com.microsoft.sqlserver.jdbc.SQLServerDriver + # PostgreSQL org.postgresql.Driver + driverClass: "YOUR_JDBC_DRIVER_CLASS" + # pega.database.type Valid values are: mssql, oracledate, udb, db2zos, postgres + dbType: "YOUR_DATABASE_TYPE" + # For databases that use multiple JDBC driver files (such as DB2), specify comma separated values for 'driverUri' + driverUri: "YOUR_JDBC_DRIVER_URI" + username: "YOUR_JDBC_USERNAME" + password: "YOUR_JDBC_PASSWORD" + # CUSTOM CONNECTION PROPERTIES + # Add a list of ; delimited connections properties. The list must end with ; + # For example: connectionProperties=user=usr;password=pwd; + connectionProperties: "" + rulesSchema: "YOUR_RULES_SCHEMA" + dataSchema: "YOUR_DATA_SCHEMA" + customerDataSchema: "" + + # If using a custom Docker registry, supply the credentials here to pull Docker images. + docker: + registry: + url: "YOUR_DOCKER_REGISTRY" + username: "YOUR_DOCKER_REGISTRY_USERNAME" + password: "YOUR_DOCKER_REGISTRY_PASSWORD" + # Docker image information for the Pega docker image, containing the application server. + pega: + image: "pegasystems/pega" + + # Upgrade specific properties + upgrade: + # Configure only for aks/pks + # Run "kubectl cluster-info" command to get the service host and https service port of kubernetes api server. + # Example - Kubernetes master is running at https://: + kube-apiserver: + serviceHost: "API_SERVICE_ADDRESS" + httpsServicePort: "SERVICE_PORT_HTTPS" + + # Specify the Pega tiers to deploy + tier: + - name: "web" + # Create a an interactive tier for web users. This tier uses + # the WebUser node type and will be exposed via a service to + # the load balancer. + nodeType: "WebUser" + + service: + # Enter the domain name to access web nodes via a load balancer. + # e.g. web.mypega.example.com + domain: "YOUR_WEB_NODE_DOMAIN" + port: 80 + targetPort: 8080 + # When provider is eks, configure alb cookie duration seconds equal to passivation time of requestors + alb_stickiness_lb_cookie_duration_seconds: 3660 + + replicas: 1 + javaOpts: "" + + pegaDiagnosticUser: "" + pegaDiagnosticPassword: "" + + deploymentStrategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + + hpa: + enabled: true + + - name: "batch" + # Create a background tier for batch processing. This tier uses + # a collection of background node types and will not be exposed to + # the load balancer. + nodeType: "BackgroundProcessing,Search,Batch,Custom1,Custom2,Custom3,Custom4,Custom5" + + replicas: 1 + javaOpts: "" + + pegaDiagnosticUser: "" + pegaDiagnosticPassword: "" + + deploymentStrategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + + hpa: + enabled: true + + - name: "stream" + # Create a stream tier for queue processing. This tier deploys + # as a stateful set to ensure durability of queued data. It may + # be optionally exposed to the load balancer. + nodeType: "Stream" + + service: + # Enter the domain name to access stream nodes via a load balancer. + # e.g. stream.mypega.example.com + domain: "YOUR_STREAM_NODE_DOMAIN" + port: 7003 + targetPort: 7003 + # When provider is eks, configure alb cookie duration seconds equal to passivation time of requestors + alb_stickiness_lb_cookie_duration_seconds: 3660 + + replicas: 2 + javaOpts: "" + + volumeClaimTemplate: + resources: + requests: + storage: 5Gi + + - name: "bix" + # Create a background tier for BIX processing. This tier uses + # the BIX node type and will not be exposed to the load balancer. + nodeType: "BIX" + + replicas: 1 + javaOpts: "" + + pegaDiagnosticUser: "" + pegaDiagnosticPassword: "" + + deploymentStrategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + + hpa: + enabled: true + + - name: "realtime" + # Create a dedicated tier for real-time data grid processing. + nodeType: "RealTime" + + replicas: 1 + javaOpts: "" + + pegaDiagnosticUser: "" + pegaDiagnosticPassword: "" + + deploymentStrategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + + hpa: + enabled: true + +# External services + +# Cassandra automatic deployment settings. +cassandra: + enabled: true + persistence: + enabled: true + resources: + requests: + memory: "4Gi" + cpu: 2 + limits: + memory: "8Gi" + cpu: 4 + +# DDS (external Cassandra) connection settings. +# These settings should only be modified if you are using a custom Cassandra deployment. +dds: + externalNodes: "" + port: "9042" + username: "dnode_ext" + password: "dnode_ext" + +# Elasticsearch deployment settings. +# Note: This Elasticsearch deployment is used for Pega search, and is not the same Elasticsearch deployment used by the EFK stack. +# These search nodes will be deployed regardless of the Elasticsearch configuration above. +pegasearch: + image: "pegasystems/search" + memLimit: "3Gi" + +# Pega Installer settings +installer: + image: "YOUR_INSTALLER_IMAGE:TAG" + adminPassword: "ADMIN_PASSWORD" + # Upgrade specific properties + upgrade: + # Type of upgrade + # Valid values are 'in-place' , 'out-of-place' + upgradeType: "in-place" + # Specify target rules schema for migration and upgrade + targetRulesSchema: "" diff --git a/charts/pega/values-medium-alldetails.yaml b/charts/pega/values-medium-alldetails.yaml new file mode 100644 index 000000000..28949f6cf --- /dev/null +++ b/charts/pega/values-medium-alldetails.yaml @@ -0,0 +1,206 @@ +--- +global: + # This values.yaml file is an example of all possible + # deployment configurations. For more information about + # each configuration option, see the project readme. + + # Enter your Kubernetes provider. + provider: "YOUR_KUBERNETES_PROVIDER" + + # Deploy Pega nodes + actions: + execute: "deploy" + + # Provide JDBC connection information to the Pega relational database + # If you are installing or upgrading on IBM DB2, update the udb.conf file in the /charts/pega/charts/installer/config/udb directory with any additional connection properties. + jdbc: + # url Valid values are: + # + # Oracle jdbc:oracle:thin:@//localhost:1521/dbName + # IBM DB/2 z / OS jdbc:db2://localhost:50000/dbName + # IBM DB/2 jdbc:db2://localhost:50000/dbName:fullyMaterializeLobData=true;fullyMaterializeInputStreams=true; + # progressiveStreaming=2;useJDBC4ColumnNameAndLabelSemantics=2; + # SQL Server jdbc:sqlserver://localhost:1433;databaseName=dbName;selectMethod=cursor;sendStringParametersAsUnicode=false + # PostgreSQL jdbc:postgresql://localhost:5432/dbName + url: "YOUR_JDBC_URL" + # driverClass -- jdbc class. Valid values are: + # + # Oracle oracle.jdbc.OracleDriver + # IBM DB/2 com.ibm.db2.jcc.DB2Driver + # SQL Server com.microsoft.sqlserver.jdbc.SQLServerDriver + # PostgreSQL org.postgresql.Driver + driverClass: "YOUR_JDBC_DRIVER_CLASS" + # pega.database.type Valid values are: mssql, oracledate, udb, db2zos, postgres + dbType: "YOUR_DATABASE_TYPE" + # For databases that use multiple JDBC driver files (such as DB2), specify comma separated values for 'driverUri' + driverUri: "YOUR_JDBC_DRIVER_URI" + username: "YOUR_JDBC_USERNAME" + password: "YOUR_JDBC_PASSWORD" + # CUSTOM CONNECTION PROPERTIES + # Add a list of ; delimited connections properties. The list must end with ; + # For example: connectionProperties=user=usr;password=pwd; + connectionProperties: "" + rulesSchema: "YOUR_RULES_SCHEMA" + dataSchema: "YOUR_DATA_SCHEMA" + customerDataSchema: "" + + # If using a custom Docker registry, supply the credentials here to pull Docker images. + docker: + registry: + url: "YOUR_DOCKER_REGISTRY" + username: "YOUR_DOCKER_REGISTRY_USERNAME" + password: "YOUR_DOCKER_REGISTRY_PASSWORD" + # Docker image information for the Pega docker image, containing the application server. + pega: + image: "pegasystems/pega" + + # Upgrade specific properties + upgrade: + # Configure only for aks/pks + # Run "kubectl cluster-info" command to get the service host and https service port of kubernetes api server. + # Example - Kubernetes master is running at https://: + kube-apiserver: + serviceHost: "API_SERVICE_ADDRESS" + httpsServicePort: "SERVICE_PORT_HTTPS" + + # Specify the Pega tiers to deploy + tier: + - name: "web" + # Create a an interactive tier for web users. This tier uses + # the WebUser node type and will be exposed via a service to + # the load balancer. + nodeType: "WebUser" + + service: + # Enter the domain name to access web nodes via a load balancer. + # e.g. web.mypega.example.com + domain: "YOUR_WEB_NODE_DOMAIN" + port: 80 + targetPort: 8080 + # When provider is eks, configure alb cookie duration seconds equal to passivation time of requestors + alb_stickiness_lb_cookie_duration_seconds: 3660 + + replicas: 1 + cpuRequest: 200m + memRequest: "6Gi" + cpuLimit: 2 + memLimit: "8Gi" + javaOpts: "" + initialHeap: "4096m" + maxHeap: "7168m" + + pegaDiagnosticUser: "" + pegaDiagnosticPassword: "" + + deploymentStrategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + + hpa: + enabled: true + minReplicas: 2 + maxReplicas: 5 + targetAverageMemoryUtilization: 185 + + - name: "batch" + # Create a background tier for batch processing. This tier uses + # a collection of background node types and will not be exposed to + # the load balancer. + nodeType: "BackgroundProcessing,Search,Batch,RealTime,Custom1,Custom2,Custom3,Custom4,Custom5,BIX" + + replicas: 1 + cpuRequest: 200m + memRequest: "6Gi" + cpuLimit: 2 + memLimit: "8Gi" + javaOpts: "" + initialHeap: "4096m" + maxHeap: "7168m" + + pegaDiagnosticUser: "" + pegaDiagnosticPassword: "" + + deploymentStrategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetAverageCPUUtilization: 700 + + - name: "stream" + # Create a stream tier for queue processing. This tier deploys + # as a stateful set to ensure durability of queued data. It may + # be optionally exposed to the load balancer. + nodeType: "Stream" + + service: + # Enter the domain name to access stream nodes via a load balancer. + # e.g. stream.mypega.example.com + domain: "YOUR_STREAM_NODE_DOMAIN" + port: 7003 + targetPort: 7003 + # When provider is eks, configure alb cookie duration seconds equal to passivation time of requestors + alb_stickiness_lb_cookie_duration_seconds: 3660 + + replicas: 2 + cpuRequest: 200m + memRequest: "6Gi" + cpuLimit: 2 + memLimit: "8Gi" + javaOpts: "" + initialHeap: "4096m" + maxHeap: "7168m" + + volumeClaimTemplate: + resources: + requests: + storage: 5Gi + +# External services + +# Cassandra automatic deployment settings. +cassandra: + enabled: true + persistence: + enabled: true + resources: + requests: + memory: "4Gi" + cpu: 2 + limits: + memory: "8Gi" + cpu: 4 + +# DDS (external Cassandra) connection settings. +# These settings should only be modified if you are using a custom Cassandra deployment. +dds: + externalNodes: "" + port: "9042" + username: "dnode_ext" + password: "dnode_ext" + +# Elasticsearch deployment settings. +# Note: This Elasticsearch deployment is used for Pega search, and is not the same Elasticsearch deployment used by the EFK stack. +# These search nodes will be deployed regardless of the Elasticsearch configuration above. +pegasearch: + image: "pegasystems/search" + memLimit: "3Gi" + +# Pega Installer settings +installer: + image: "YOUR_INSTALLER_IMAGE:TAG" + adminPassword: "ADMIN_PASSWORD" + # Upgrade specific properties + upgrade: + # Type of upgrade + # Valid values are 'in-place' , 'out-of-place' + upgradeType: "in-place" + # Specify target rules schema for migration and upgrade + targetRulesSchema: "" diff --git a/charts/pega/values-minimal.yaml b/charts/pega/values-minimal.yaml new file mode 100644 index 000000000..194c00ffd --- /dev/null +++ b/charts/pega/values-minimal.yaml @@ -0,0 +1,109 @@ +--- +global: + # This values.yaml file is an example of a minimal Pega + # deployment configuration. For more information about + # configuration options, see the project readme. + + # Enter your Kubernetes provider. + provider: "k8s" + + # Deploy Pega nodes + actions: + execute: "deploy" + + # Provide JDBC connection information to the Pega relational database + # If you are installing or upgrading on IBM DB2, update the udb.conf file in the /charts/pega/charts/installer/config/udb directory with any additional connection properties. + jdbc: + # url Valid values are: + # + # Oracle jdbc:oracle:thin:@//localhost:1521/dbName + # IBM DB/2 z / OS jdbc:db2://localhost:50000/dbName + # IBM DB/2 jdbc:db2://localhost:50000/dbName:fullyMaterializeLobData=true;fullyMaterializeInputStreams=true; + # progressiveStreaming=2;useJDBC4ColumnNameAndLabelSemantics=2; + # SQL Server jdbc:sqlserver://localhost:1433;databaseName=dbName;selectMethod=cursor;sendStringParametersAsUnicode=false + # PostgreSQL jdbc:postgresql://localhost:5432/dbName + url: "YOUR_JDBC_URL" + # driverClass -- jdbc class. Valid values are: + # + # Oracle oracle.jdbc.OracleDriver + # IBM DB/2 com.ibm.db2.jcc.DB2Driver + # SQL Server com.microsoft.sqlserver.jdbc.SQLServerDriver + # PostgreSQL org.postgresql.Driver + driverClass: "YOUR_JDBC_DRIVER_CLASS" + # pega.database.type Valid values are: mssql, oracledate, udb, db2zos, postgres + dbType: "YOUR_DATABASE_TYPE" + # For databases that use multiple JDBC driver files (such as DB2), specify comma separated values for 'driverUri' + driverUri: "YOUR_JDBC_DRIVER_URI" + username: "YOUR_JDBC_USERNAME" + password: "YOUR_JDBC_PASSWORD" + # CUSTOM CONNECTION PROPERTIES + # Add a list of ; delimited connections properties. The list must end with ; + # For example: connectionProperties=user=usr;password=pwd; + connectionProperties: "" + rulesSchema: "YOUR_RULES_SCHEMA" + dataSchema: "YOUR_DATA_SCHEMA" + customerDataSchema: "" + + # If using a custom Docker registry, supply the credentials here to pull Docker images. + docker: + registry: + url: "YOUR_DOCKER_REGISTRY" + username: "YOUR_DOCKER_REGISTRY_USERNAME" + password: "YOUR_DOCKER_REGISTRY_PASSWORD" + # Docker image information for the Pega docker image, containing the application server. + pega: + image: "pegasystems/pega" + + # Specify the Pega tiers to deploy + # For a minimal deployment, use a single tier to reduce resource consumption. + tier: + - name: "minikube" + nodeType: "Stream,BackgroundProcessing,WebUser,Search" + service: + # Enter the domain name to access web nodes via a load balancer. + # e.g. web.mypega.example.com + domain: "YOUR_MINIKUBE_NODE_DOMAIN" + port: 80 + targetPort: 8080 + # Set resource consumption to minimal levels + replicas: 1 + cpuRequest: 200m + memRequest: "5Gi" + cpuLimit: 2 + memLimit: "6Gi" + javaOpts: "" + initialHeap: "4096m" + maxHeap: "4096m" + pegaDiagnosticUser: "" + pegaDiagnosticPassword: "" + volumeClaimTemplate: + resources: + requests: + storage: 5Gi + +# External services + +# Cassandra automatic deployment settings. +# Disabled by default for minimal deployments. +cassandra: + enabled: false + +# DDS (external Cassandra) connection settings. +# These settings should only be modified if you are using a custom Cassandra deployment. +dds: + externalNodes: "" + port: "9042" + username: "dnode_ext" + password: "dnode_ext" + +# Elasticsearch deployment settings. +# Note: This Elasticsearch deployment is used for Pega search, and is not the same Elasticsearch deployment used by the EFK stack. +# These search nodes will be deployed regardless of the Elasticsearch configuration above. +pegasearch: + image: "pegasystems/search" + memLimit: "3Gi" + +# Pega Installer settings +installer: + image: "YOUR_INSTALLER_IMAGE:TAG" + adminPassword: "ADMIN_PASSWORD" diff --git a/charts/pega/values.yaml b/charts/pega/values.yaml index 1f1579285..fe209b535 100644 --- a/charts/pega/values.yaml +++ b/charts/pega/values.yaml @@ -1,189 +1,156 @@ --- -# Enter your Kubernetes provider. Replace "YOUR_KUBERNETES_PROVIDER" with one of the following values: -# k8s - for a deployment using open-source Kubernetes -# openshift - for a deployment using Red Hat Openshift -# eks - for a deployment using Amazon EKS -# gke - for a deployment using Google Kubernetes Engine -# pks - for a deployment using Pivotal Container Service -# aks - for a deployment using Azure Kubernetes Service -provider: "YOUR_KUBERNETES_PROVIDER" - -# Configure Traefik for load balancing: -# If enabled: true, Traefik is deployed automatically. -# If enabled: false, Traefik is not deployed and load balancing must be configured manually. -# Pega recommends enabling Traefik on providers other than Openshift. -# On Openshift, Traefik is ignored and Pega uses Openshift's built-in load balancer. -traefik: - enabled: true - # Set any additional Traefik parameters. These values will be used by Traefik's helm chart. - # See https://github.com/helm/charts/blob/master/stable/traefik/values.yaml - # Set traefik.serviceType to "LoadBalancer" on gke, aks, and pks - serviceType: NodePort - # If enabled is set to "true", ssl will be enabled for traefik - ssl: - enabled: false - rbac: - enabled: true - service: - nodePorts: - # NodePorts for traefik service. - http: 30080 - https: 30443 - -# Docker image information for the Pega docker image, containing the application server. -# To use this feature you MUST host the image using a private registry. -# See https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry -# Note: the imagePullPolicy is always for all images in this deployment, so pre-pulling images will not work. -docker: - image: "YOUR_PEGA_IMAGE:TAG" - registry: - url: "YOUR_DOCKER_REGISTRY" - # Provide your Docker registry username and password to access the docker image. These credentials will be - # used for both the Pega Platform image and the Elasticsearch image. - username: "YOUR_DOCKER_REGISTRY_USERNAME" - password: "YOUR_DOCKER_REGISTRY_PASSWORD" - -# JDBC information to connect to the Pega database. -# Pega must be installed to this database before deploying on Kubernetes. -# -# Examples for jdbc url and driver class: -# For Oracle: -# url: jdbc:oracle:thin:@//YOUR_DB_HOST:1521/YOUR_DB_NAME -# driverClass: oracle.jdbc.OracleDriver -# For Microsoft SQL Server: -# url: jdbc:sqlserver://YOUR_DB_HOST:1433;databaseName=YOUR_DB_NAME;selectMethod=cursor;sendStringParametersAsUnicode=false -# driverClass: com.microsoft.sqlserver.jdbc.SQLServerDriver -# For IBM DB2 for LUW: -# url: jdbc:db2://YOUR_DB_HOST:50000/YOUR_DB_NAME:fullyMaterializeLobData=true;fullyMaterializeInputStreams=true;progressiveStreaming=2;useJDBC4ColumnNameAndLabelSemantics=2; -# driverClass: com.ibm.db2.jcc.DB2Driver -# For IBM DB2 for z/OS: -# url: jdbc:db2://YOUR_DB_HOST:50000/YOUR_DB_NAME -# driverClass: com.ibm.db2.jcc.DB2Driver -# For PostgreSQL: -# url: jdbc:postgresql://YOUR_DB_HOST:5432/YOUR_DB_NAME -# driverClass: org.postgresql.Driver -jdbc: - url: "YOUR_JDBC_URL" - driverClass: "YOUR_JDBC_DRIVER_CLASS" - # Set the uri to download the database driver for your database. - driverUri: "YOUR_JDBC_DRIVER_URI" - # Set your database username and password. These values will be obfuscated and stored in a secrets file. - username: "YOUR_JDBC_USERNAME" - password: "YOUR_JDBC_PASSWORD" - # Set the rules and data schemas for your database. Additional schemas can be defined within Pega. - rulesSchema: "YOUR_RULES_SCHEMA" - dataSchema: "YOUR_DATA_SCHEMA" - # If configured, set the customerdata schema for your database. Defaults to value of dataSchema if not provided. - customerDataSchema: - -# Pega web deployment settings. -web: - # Enter the domain name to access web nodes via a load balancer. - # e.g. web.mypega.example.com - domain: "YOUR_WEB_NODE_DOMAIN" - # Enter the number of web nodes for Kubernetes to deploy (minimum 1). - replicas: 1 - # Enter the CPU limit for each web node (recommended 2). - cpuLimit: 2 - # Enter the memory limit for each web node (recommended 8Gi). - memLimit: "8Gi" - # Enter any additional java options. - javaOpts: "" - # Initial heap size for the jvm. - initialHeap: "2048m" - # Maximum heap size for the jvm. - maxHeap: "7168m" - # Enter the location of the prconfig.xml and prlog4j2.xml configuration files. - # The file paths are relative to this project's root directory. - # Pega includes default files, but supports overriding these files with customized prconfig and prlog4j2 files. - prconfigPath: "config/prconfig.xml" - prlog4j2Path: "config/prlog4j2.xml" - # Set your Pega diagnostic credentials. - pegaDiagnosticUser: "" - pegaDiagnosticPassword: "" - hpa: - enabled: true - # Enter the minimum number of replicas that HPA can scale-down - minReplicas: 1 - # Enter the maximum number of replicas that HPA can scale-up - maxReplicas: 5 - # Enter the threshold value for average cpu utilization percentage. This value is calculated on resource request. - # Default value is set at 70% of web pod cpu limit. Pega web pod default cpu request is 200m & limit is 2c. 70% of limit or 700% request which is 1.4c. - # HPA will scale up if pega web pods average cpu utilization reaches 1.4c - targetAverageCPUUtilization: 700 - # Enter the threshold value for average memory utilization percentage. This value is calculated on resource request. - # Default value is set at 70% of web pod memory limit. Pega web pod default memory request is 2Gi & limit is 8Gi. 70% of limit or 280% request which is 5.6Gi. - # HPA will scale up if pega web pods average memory utilization reaches 5.6Gi - targetAverageMemoryUtilization: 280 - -# Pega stream deployment settings. -stream: - # Enter the domain name to access stream nodes via a load balancer. - # e.g. stream.mypega.example.com - domain: "YOUR_STREAM_NODE_DOMAIN" - # Enter the number of stream nodes for Kubernetes to deploy (minimum 2). - replicas: 2 - # Enter the CPU limit for each stream node (recommended 2). - cpuLimit: 2 - # Enter the memory limit for each stream node (recommended 8Gi). - memLimit: "8Gi" - # Enter any additional java options - javaOpts: "" - # Initial heap size for the jvm - initialHeap: "2048m" - # Maximum heap size for the jvm - maxHeap: "7168m" - # Enter the location of the prconfig.xml and prlog4j2.xml configuration files. - # The file paths are relative to this project's root directory. - # Pega includes default files, but supports overriding these files with customized prconfig and prlog4j2 files. - prconfigPath: "config/prconfig.xml" - prlog4j2Path: "config/prlog4j2.xml" - -# Pega batch deployment settings. -batch: - # Enter the number of batch nodes for Kubernetes to deploy (minimum 1). - replicas: 1 - # Enter the CPU limit for each batch node (recommended 2). - cpuLimit: 2 - # Enter the memory limit for each batch node (recommended 8Gi). - memLimit: "8Gi" - # Enter any additional java options. - javaOpts: "" - # Initial heap size for the jvm. - initialHeap: "2048m" - # Maximum heap size for the jvm. - maxHeap: "7168m" - # Enter the location of the prconfig.xml and prlog4j2.xml configuration files. - # The file paths are relative to this project's root directory. - # Pega includes default files, but supports overriding these files with customized prconfig and prlog4j2 files. - prconfigPath: "config/prconfig.xml" - prlog4j2Path: "config/prlog4j2.xml" - hpa: - enabled: true - # Enter the minimum number of replicas that HPA can scale-down - minReplicas: 1 - # Enter the maximum number of replicas that HPA can scale-up - maxReplicas: 3 - # Enter the threshold value for average cpu utilization percentage. This value is calculated on resource request. - # Default value is set at 70% of batch pod cpu limit. Pega batch pod default cpu request is 200m & limit is 2c. 70% of limit or 700% request which is 1.4c. - # HPA will scale up if pega batch pods average cpu utilization reaches 1.4c - targetAverageCPUUtilization: 700 - # Enter the threshold value for average memory utilization percentage. This value is calculated on resource request. - # Default value is set at 70% of batch pod memory limit. Pega batch pod default memory request is 2Gi & limit is 8Gi. 70% of limit or 280% request which is 5.6Gi. - # HPA will scale up if pega batch pods average memory utilization reaches 5.6Gi - targetAverageMemoryUtilization: 280 +global: + # This values.yaml file is an example. For more information about + # each configuration option, see the project readme. + + # Enter your Kubernetes provider. + provider: "YOUR_KUBERNETES_PROVIDER" + + # Deploy Pega nodes + actions: + execute: "deploy" + + # Provide JDBC connection information to the Pega relational database + # If you are installing or upgrading on IBM DB2, update the udb.conf file in the /charts/pega/charts/installer/config/udb directory with any additional connection properties. + jdbc: + # url Valid values are: + # + # Oracle jdbc:oracle:thin:@//localhost:1521/dbName + # IBM DB/2 z / OS jdbc:db2://localhost:50000/dbName + # IBM DB/2 jdbc:db2://localhost:50000/dbName:fullyMaterializeLobData=true;fullyMaterializeInputStreams=true; + # progressiveStreaming=2;useJDBC4ColumnNameAndLabelSemantics=2; + # SQL Server jdbc:sqlserver://localhost:1433;databaseName=dbName;selectMethod=cursor;sendStringParametersAsUnicode=false + # PostgreSQL jdbc:postgresql://localhost:5432/dbName + url: "YOUR_JDBC_URL" + # driverClass -- jdbc class. Valid values are: + # + # Oracle oracle.jdbc.OracleDriver + # IBM DB/2 com.ibm.db2.jcc.DB2Driver + # SQL Server com.microsoft.sqlserver.jdbc.SQLServerDriver + # PostgreSQL org.postgresql.Driver + driverClass: "YOUR_JDBC_DRIVER_CLASS" + # pega.database.type Valid values are: mssql, oracledate, udb, db2zos, postgres + dbType: "YOUR_DATABASE_TYPE" + # For databases that use multiple JDBC driver files (such as DB2), specify comma separated values for 'driverUri' + driverUri: "YOUR_JDBC_DRIVER_URI" + username: "YOUR_JDBC_USERNAME" + password: "YOUR_JDBC_PASSWORD" + # CUSTOM CONNECTION PROPERTIES + # Add a list of ; delimited connections properties. The list must end with ; + # For example: connectionProperties=user=usr;password=pwd; + connectionProperties: "" + rulesSchema: "YOUR_RULES_SCHEMA" + dataSchema: "YOUR_DATA_SCHEMA" + customerDataSchema: "" + + # If using a custom Docker registry, supply the credentials here to pull Docker images. + docker: + registry: + url: "YOUR_DOCKER_REGISTRY" + username: "YOUR_DOCKER_REGISTRY_USERNAME" + password: "YOUR_DOCKER_REGISTRY_PASSWORD" + # Docker image information for the Pega docker image, containing the application server. + pega: + image: "pegasystems/pega" + + # Upgrade specific properties + upgrade: + # Configure only for aks/pks + # Run "kubectl cluster-info" command to get the service host and https service port of kubernetes api server. + # Example - Kubernetes master is running at https://: + kube-apiserver: + serviceHost: "API_SERVICE_ADDRESS" + httpsServicePort: "SERVICE_PORT_HTTPS" + + # Specify the Pega tiers to deploy + tier: + - name: "web" + # Create a an interactive tier for web users. This tier uses + # the WebUser node type and will be exposed via a service to + # the load balancer. + nodeType: "WebUser" + + service: + # Enter the domain name to access web nodes via a load balancer. + # e.g. web.mypega.example.com + domain: "YOUR_WEB_NODE_DOMAIN" + port: 80 + targetPort: 8080 + # When provider is eks, configure alb cookie duration seconds equal to passivation time of requestors + alb_stickiness_lb_cookie_duration_seconds: 3660 + + replicas: 1 + javaOpts: "" + + pegaDiagnosticUser: "" + pegaDiagnosticPassword: "" + + deploymentStrategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + + hpa: + enabled: true + + - name: "batch" + # Create a background tier for batch processing. This tier uses + # a collection of background node types and will not be exposed to + # the load balancer. + nodeType: "BackgroundProcessing,Search,Batch,RealTime,Custom1,Custom2,Custom3,Custom4,Custom5,BIX" + + replicas: 1 + javaOpts: "" + + pegaDiagnosticUser: "" + pegaDiagnosticPassword: "" + + deploymentStrategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + + hpa: + enabled: true + + - name: "stream" + # Create a stream tier for queue processing. This tier deploys + # as a stateful set to ensure durability of queued data. It may + # be optionally exposed to the load balancer. + nodeType: "Stream" + + service: + # Enter the domain name to access stream nodes via a load balancer. + # e.g. stream.mypega.example.com + domain: "YOUR_STREAM_NODE_DOMAIN" + port: 7003 + targetPort: 7003 + # When provider is eks, configure alb cookie duration seconds equal to passivation time of requestors + alb_stickiness_lb_cookie_duration_seconds: 3660 + + replicas: 2 + cpuRequest: 200m + memRequest: "6Gi" + cpuLimit: 2 + memLimit: "8Gi" + javaOpts: "" + initialHeap: "4096m" + maxHeap: "7168m" + + volumeClaimTemplate: + resources: + requests: + storage: 5Gi + +# External services # Cassandra automatic deployment settings. cassandra: - # Set cassandra.enabled to true to automatically deploy the Cassandra sub-chart. - # Set to false if dds.externalNodes is set, or if you do not need Cassandra in your Pega environment. enabled: true - # Set any additional Cassandra parameters. These values will be used by Cassandra's helm chart. - # See https://github.com/helm/charts/blob/master/incubator/cassandra/values.yaml persistence: enabled: true - ## Minimum memory for development is 4GB and 2 CPU cores - ## Minimum memory for production is 8GB and 4 CPU cores resources: requests: memory: "4Gi" @@ -195,11 +162,7 @@ cassandra: # DDS (external Cassandra) connection settings. # These settings should only be modified if you are using a custom Cassandra deployment. dds: - # Enter an external node to use a custom external Cassandra deployment. If cassandra.enabled is set to true, leave dds.externalNodes blank. - # If using an external node, cassandra.enabled should be set to false. - # If dds.externalNodes is set and cassandra.enabled is set to true, Pega will connect to Cassandra using dds.externalNodes. externalNodes: "" - # The port, username, and password should only be modified if supplying a custom external Cassandra node. port: "9042" username: "dnode_ext" password: "dnode_ext" @@ -207,91 +170,18 @@ dds: # Elasticsearch deployment settings. # Note: This Elasticsearch deployment is used for Pega search, and is not the same Elasticsearch deployment used by the EFK stack. # These search nodes will be deployed regardless of the Elasticsearch configuration above. -search: - # Enter the number of search nodes for Kubernetes to deploy (minimum 1). - replicas: 1 - # If externalURL is set, no search nodes will be deployed automatically, and Pega will use this search node url. - externalURL: "" - # Enter the docker image used to deploy Elasticsearch. This value will be ignored if using an external url. - # Push the Elasticsearch image to your internal docker registry. This must be the same registry as the docker section above. - image: "YOUR_ELASTICSEARCH_IMAGE:TAG" - # Enter the CPU limit for each search node (recommended 1). - cpuLimit: 1 - # Enter the Memory limit for each search node (recommended 4Gi). - memLimit: "4Gi" - # Enter the volume size limit for each search node (recommended 5Gi). - volumeSize: "5Gi" - - -# Configure EFK stack for logging: -# For a complete EFK stack: elasticsearch, fluentd-elasticsearch, and kibana should all be enabled -# Pega recommends deploying EFK only on k8s -# On Openshift, see https://docs.openshift.com/container-platform/3.11/install_config/aggregate_logging.html -# On EKS, see https://eksworkshop.com/logging/ - -# Replace false with true to deploy EFK. -# Do not remove &deploy_efk; it is a yaml anchor which is referenced by the EFK subcharts. -deploy_efk: &deploy_efk false - -elasticsearch: - enabled: *deploy_efk - # Set any additional elastic search parameters. These values will be used by elasticsearch helm chart. - # See https://github.com/helm/charts/tree/master/stable/elasticsearch/values.yaml - # - # If you need to change this value then you will also need to replace the same - # part of the value within the following properties further below: - # - # kibana.files.kibana.yml.elasticsearch.url - # fluentd-elasticsearch.elasticsearch.host - # - fullnameOverride: "elastic-search" - -kibana: - enabled: *deploy_efk - # Set any additional kibana parameters. These values will be used by Kibana's helm chart. - # See https://github.com/helm/charts/tree/master/stable/kibana/values.yaml - files: - kibana.yml: - elasticsearch.url: http://elastic-search-client:9200 - service: - externalPort: 80 - ingress: - # If enabled is set to "true", an ingress is created to access kibana. - enabled: true - # Enter the domain name to access kibana via a load balancer. - hosts: - - "YOUR_WEB.KIBANA.EXAMPLE.COM" - -fluentd-elasticsearch: - enabled: *deploy_efk - # Set any additional fluentd-elasticsearch parameters. These values will be used by fluentd-elasticsearch's helm chart. - # See https://github.com/helm/charts/tree/master/stable/fluentd-elasticsearch/values.yaml - elasticsearch: - host: elastic-search-client - buffer_chunk_limit: 250M - buffer_queue_limit: 30 - -metrics-server: - # Set this to true to install metrics-server. Follow below guidelines specific - # to each provider, open-source Kubernetes, Openshift & EKS - mandatory to - # set this to true if web.hpa.enabled or batch.hpa.enabled is true GKE or - # AKS - set this to false since metrics-server is installed in the cluster - # by default. - enabled: true - # Set any additional metrics-server parameters. These values will be used by - # metrics-server's helm chart. - # See https://github.com/helm/charts/blob/master/stable/metrics-server/values.yaml - args: - - --logtostderr - # The order in which to consider different Kubelet node address types when - # connecting to Kubelet. Uncomment below arguemnt if host names are not - # resolvable from metrics server pod. This setting is not required for - # public cloud providers & openshift enterprise. It may be required for - # open-source Kubernetes. - # - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP - # Uncomment below arguemnt to skip verifying Kubelet CA certificates. - # Not recommended for production usage, but can be useful in test clusters - # with self-signed Kubelet serving certificates. This setting is not - # required for public cloud providers & openshift enterprise. It may be - # required for open-source Kubernetes. - # - --kubelet-insecure-tls +pegasearch: + image: "pegasystems/search" + memLimit: "3Gi" + +# Pega Installer settings +installer: + image: "YOUR_INSTALLER_IMAGE:TAG" + adminPassword: "ADMIN_PASSWORD" + # Upgrade specific properties + upgrade: + # Type of upgrade + # Valid values are 'in-place' , 'out-of-place' + upgradeType: "in-place" + # Specify target rules schema for migration and upgrade + targetRulesSchema: "" diff --git a/descriptor-template.json b/descriptor-template.json new file mode 100644 index 000000000..4c2fb1e08 --- /dev/null +++ b/descriptor-template.json @@ -0,0 +1,33 @@ +{ + "package": { + "name": "pega-helm-charts", + "repo": "pega-helm-charts", + "subject": "pegasystems", + "vcs_url": "https://github.com/pegasystems/pega-helm-charts", + "licenses": ["Apache License 2.0"] + }, + + "version": { + "name": "1.0.0" + }, + + "files": + [ + {"includePattern": "${PEGA_FILE_NAME}" , "uploadPattern": "${PEGA_FILE_NAME}", + "matrixParams": { + "override" : 1 + } + }, + {"includePattern": "${ADDONS_FILE_NAME}" , "uploadPattern": "${ADDONS_FILE_NAME}", + "matrixParams": { + "override" : 1 + } + }, + {"includePattern": "index.yaml", "uploadPattern": "index.yaml", + "matrixParams": { + "override" : 1 + } + } + ], + "publish": true +} \ No newline at end of file diff --git a/docs/RUNBOOK_MINIKUBE.md b/docs/RUNBOOK_MINIKUBE.md new file mode 100644 index 000000000..84d87aecc --- /dev/null +++ b/docs/RUNBOOK_MINIKUBE.md @@ -0,0 +1,69 @@ +# Minikube + +Minikube runs a single-node Kubernetes cluster inside a Virtual Machine (VM) on your laptop for users looking to try out Kubernetes or develop with it day-to-day. For more information on minikube, see the [Minikube documentation](https://kubernetes.io/docs/setup/learning-environment/minikube/). + + +This document explains on how to deploy pega using minikube as a provider. + +# Quick Start + +1. For installing minikube - https://kubernetes.io/docs/tasks/tools/install-minikube/ +2. Minikube Documentation - https://minikube.sigs.k8s.io/docs/overview/ + +# Basic Commands for Minikube + +- Start a cluster by running: + ```minikube start``` + +- Access the Kubernetes Dashboard running within the minikube cluster: + ```minikube dashboard``` + +- Stop your local minikube cluster: + ```minikube stop``` + +- Delete your local cluster: +```minikube delete``` + +- To start minikube with different version of kubernetes +```minikube start --kubernetes-version v1.15.0``` + + +# FAQ's + +1. How to increase the memory limit of a running minikube + + There is no direct way to increase the memory limit of a running minikube. + + ``` minikube stop``` + + ```minikube delete ``` + + ```minikube start --cpus 4 --memory 12288 ``` + +2. How to start minikube with custom CPU/memory limits + + ```minikube start --cpus 4 --memory 10240``` + +3. How to set default memory which is considered on each minikube start + + ```minikube config set memory 5000``` followed by ```minikube start``` + +4. How to access Pega Designer Studio after deployment + + ``` :/prweb``` + +minikube ip can be fetched using command - ``` minikube ip``` and Pega service Nodeport can be fetched using below command +```kubectl get service -o go-template='{{range.spec.ports}}{{"Port to access: "}}{{.nodePort}}{{end}}' --namespace ``` + +***Recommended Memory Limits*** + +Start minikube with at least 4 CPU’s and 10GB memory for complete pega deployment. As per the need increase the limits of minikube. + +***Note*** +1. Use “values-minimal.yaml” to deploy pega which is available in the [pega chart](\pega-helm-charts\charts\pega) directory. + + Example helm command to deploy + + ```helm install . -n mypega --namespace myproject --values ./values-minimal.yaml``` + +2. As this runs on the personal laptop for a day-to-day project with minimal memory and CPU limits, minikube supports only "install", "deploy" and "install-deploy" actions. It is advisable to use this kind of cluster configuration for simple activities on Pega as it might spike with CPU and memory. diff --git a/terratest/src/test/Gopkg.lock b/terratest/src/test/Gopkg.lock new file mode 100644 index 000000000..5a653d6c2 --- /dev/null +++ b/terratest/src/test/Gopkg.lock @@ -0,0 +1,695 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:fd319661e53f3607b8ddbded6121f8e4fe42978cb66968b20fdee68e10d10f9f" + name = "cloud.google.com/go" + packages = ["compute/metadata"] + pruneopts = "" + revision = "264def2dd949cdb8a803bb9f50fa29a67b798a6a" + version = "v0.46.3" + +[[projects]] + digest = "1:22638c997ebbea4c106a0ea7457f62395cfe6be66fad93c1c98a79f985655e07" + name = "github.com/aws/aws-sdk-go" + packages = [ + "aws", + "aws/awserr", + "aws/awsutil", + "aws/client", + "aws/client/metadata", + "aws/corehandlers", + "aws/credentials", + "aws/credentials/ec2rolecreds", + "aws/credentials/endpointcreds", + "aws/credentials/processcreds", + "aws/credentials/stscreds", + "aws/crr", + "aws/csm", + "aws/defaults", + "aws/ec2metadata", + "aws/endpoints", + "aws/request", + "aws/session", + "aws/signer/v4", + "internal/ini", + "internal/s3err", + "internal/sdkio", + "internal/sdkmath", + "internal/sdkrand", + "internal/sdkuri", + "internal/shareddefaults", + "private/protocol", + "private/protocol/ec2query", + "private/protocol/eventstream", + "private/protocol/eventstream/eventstreamapi", + "private/protocol/json/jsonutil", + "private/protocol/jsonrpc", + "private/protocol/query", + "private/protocol/query/queryutil", + "private/protocol/rest", + "private/protocol/restxml", + "private/protocol/xml/xmlutil", + "service/acm", + "service/autoscaling", + "service/cloudwatchlogs", + "service/dynamodb", + "service/ec2", + "service/ecs", + "service/iam", + "service/kms", + "service/rds", + "service/s3", + "service/s3/s3iface", + "service/s3/s3manager", + "service/sns", + "service/sqs", + "service/ssm", + "service/sts", + "service/sts/stsiface", + ] + pruneopts = "" + revision = "8ed263425377bfd1b72b611e1769d39bad41b3ea" + version = "v1.25.1" + +[[projects]] + digest = "1:b529f4bf748979caa18b599d40d13e8b6e591a74b340f315ce4f95e119c288c2" + name = "github.com/boombuler/barcode" + packages = [ + ".", + "qr", + "utils", + ] + pruneopts = "" + revision = "3cfea5ab600ae37946be2b763b8ec2c1cf2d272d" + version = "v1.0.0" + +[[projects]] + digest = "1:141635a36d65611d06a05ec1d17be950e386426b6540169fe7c9476df41f6493" + name = "github.com/cpuguy83/go-md2man" + packages = ["md2man"] + pruneopts = "" + revision = "7762f7e404f8416dfa1d9bb6a8c192aa9acb4d19" + version = "v1.0.10" + +[[projects]] + digest = "1:0deddd908b6b4b768cfc272c16ee61e7088a60f7fe2f06c547bd3d8e1f8b8e77" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + branch = "master" + digest = "1:d6c13a378213e3de60445e49084b8a0a9ce582776dfc77927775dbeb3ff72a35" + name = "github.com/docker/spdystream" + packages = [ + ".", + "spdy", + ] + pruneopts = "" + revision = "6480d4af844c189cf5dd913db24ddd339d3a4f85" + +[[projects]] + digest = "1:b13707423743d41665fd23f0c36b2f37bb49c30e94adb813319c44188a51ba22" + name = "github.com/ghodss/yaml" + packages = ["."] + pruneopts = "" + revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" + version = "v1.0.0" + +[[projects]] + branch = "master" + digest = "1:26317724ed32bcf2ef15454613d2a8fe9d670b12f073cfd20db3bcec54e069ab" + name = "github.com/go-errors/errors" + packages = ["."] + pruneopts = "" + revision = "d98b870cc4e05f1545532a80e9909be8216095b6" + +[[projects]] + digest = "1:e692d16fdfbddb94e9e4886aaf6c08bdbae5cb4ac80651445de9181b371c6e46" + name = "github.com/go-sql-driver/mysql" + packages = ["."] + pruneopts = "" + revision = "72cd26f257d44c1114970e19afddcd812016007e" + version = "v1.4.1" + +[[projects]] + digest = "1:8a7fe65e9ac2612c4df602cc9f014a92406776d993ff0f28335e5a8831d87c53" + name = "github.com/gogo/protobuf" + packages = [ + "proto", + "sortkeys", + ] + pruneopts = "" + revision = "0ca988a254f991240804bf9821f3450d87ccbb1b" + version = "v1.3.0" + +[[projects]] + branch = "master" + digest = "1:107b233e45174dbab5b1324201d092ea9448e58243ab9f039e4c0f332e121e3a" + name = "github.com/golang/glog" + packages = ["."] + pruneopts = "" + revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" + +[[projects]] + digest = "1:b852d2b62be24e445fcdbad9ce3015b44c207815d631230dfce3f14e7803f5bf" + name = "github.com/golang/protobuf" + packages = [ + "proto", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp", + ] + pruneopts = "" + revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7" + version = "v1.3.2" + +[[projects]] + digest = "1:1e5b1e14524ed08301977b7b8e10c719ed853cbf3f24ecb66fae783a46f207a6" + name = "github.com/google/btree" + packages = ["."] + pruneopts = "" + revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306" + version = "v1.0.0" + +[[projects]] + digest = "1:8d4a577a9643f713c25a32151c0f26af7228b4b97a219b5ddb7fd38d16f6e673" + name = "github.com/google/gofuzz" + packages = ["."] + pruneopts = "" + revision = "f140a6486e521aad38f5917de355cbf147cc0496" + version = "v1.0.0" + +[[projects]] + digest = "1:c1d7e883c50a26ea34019320d8ae40fad86c9e5d56e63a1ba2cb618cef43e986" + name = "github.com/google/uuid" + packages = ["."] + pruneopts = "" + revision = "064e2069ce9c359c118179501254f67d7d37ba24" + version = "0.2" + +[[projects]] + digest = "1:728f28282e0edc47e2d8f41c9ec1956ad645ad6b15e6376ab31e2c3b094fc38f" + name = "github.com/googleapis/gnostic" + packages = [ + "OpenAPIv2", + "compiler", + "extensions", + ] + pruneopts = "" + revision = "ab0dd09aa10e2952b28e12ecd35681b20463ebab" + version = "v0.3.1" + +[[projects]] + branch = "master" + digest = "1:e1fd67b5695fb12f54f979606c5d650a5aa72ef242f8e71072bfd4f7b5a141a0" + name = "github.com/gregjones/httpcache" + packages = [ + ".", + "diskcache", + ] + pruneopts = "" + revision = "901d90724c7919163f472a9812253fb26761123d" + +[[projects]] + digest = "1:fece294a325eddd1e104aa365d1d90dee1dceafec92cd2727a83c44d0bee5ec3" + name = "github.com/gruntwork-io/gruntwork-cli" + packages = [ + "collections", + "errors", + ] + pruneopts = "" + revision = "2b40fc3e3a9c0987119998f8ddc486e72a3e303c" + version = "v0.5.1" + +[[projects]] + digest = "1:b73e192a8b22fc5fea78c4fa3ac6528db798d0803edc68bc962e914fe1164287" + name = "github.com/gruntwork-io/terratest" + packages = [ + "modules/aws", + "modules/collections", + "modules/customerrors", + "modules/environment", + "modules/files", + "modules/helm", + "modules/k8s", + "modules/logger", + "modules/random", + "modules/retry", + "modules/shell", + "modules/ssh", + ] + pruneopts = "" + revision = "495f4a90acde629ffcac17f75d308c8273c34646" + version = "v0.18.6" + +[[projects]] + digest = "1:31bfd110d31505e9ffbc9478e31773bf05bf02adcaeb9b139af42684f9294c13" + name = "github.com/imdario/mergo" + packages = ["."] + pruneopts = "" + revision = "7c29201646fa3de8506f701213473dd407f19646" + version = "v0.3.7" + +[[projects]] + digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be" + name = "github.com/inconshreveable/mousetrap" + packages = ["."] + pruneopts = "" + revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" + version = "v1.0" + +[[projects]] + digest = "1:13fe471d0ed891e8544eddfeeb0471fd3c9f2015609a1c000aefdedf52a19d40" + name = "github.com/jmespath/go-jmespath" + packages = ["."] + pruneopts = "" + revision = "c2b33e84" + +[[projects]] + digest = "1:e716a02584d94519e2ccf7ac461c4028da736d41a58c1ed95e641c1603bdb056" + name = "github.com/json-iterator/go" + packages = ["."] + pruneopts = "" + revision = "27518f6661eba504be5a7a9a9f6d9460d892ade3" + version = "v1.1.7" + +[[projects]] + digest = "1:6dbb0eb72090871f2e58d1e37973fe3cb8c0f45f49459398d3fc740cb30e13bd" + name = "github.com/mitchellh/go-homedir" + packages = ["."] + pruneopts = "" + revision = "af06845cf3004701891bf4fdb884bfe4920b3727" + version = "v1.1.0" + +[[projects]] + digest = "1:0c0ff2a89c1bb0d01887e1dac043ad7efbf3ec77482ef058ac423d13497e16fd" + name = "github.com/modern-go/concurrent" + packages = ["."] + pruneopts = "" + revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" + version = "1.0.3" + +[[projects]] + digest = "1:e32bdbdb7c377a07a9a46378290059822efdce5c8d96fe71940d87cb4f918855" + name = "github.com/modern-go/reflect2" + packages = ["."] + pruneopts = "" + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" + +[[projects]] + branch = "master" + digest = "1:5f0faa008e8ff4221b55a1a5057c8b02cb2fd68da6a65c9e31c82b72cbc836d0" + name = "github.com/petar/GoLLRB" + packages = ["llrb"] + pruneopts = "" + revision = "33fb24c13b99c46c93183c291836c573ac382536" + +[[projects]] + digest = "1:4709c61d984ef9ba99b037b047546d8a576ae984fb49486e48d99658aa750cd5" + name = "github.com/peterbourgon/diskv" + packages = ["."] + pruneopts = "" + revision = "0be1b92a6df0e4f5cb0a5d15fb7f643d0ad93ce6" + version = "v3.0.0" + +[[projects]] + digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + digest = "1:09d0eed1a0e502dfff8227c3fdbe022ea4bd722c1db3daf7251f20cfc549b428" + name = "github.com/pquerna/otp" + packages = [ + ".", + "hotp", + "totp", + ] + pruneopts = "" + revision = "43bebefda392017900e7a7b237b4c914c6a55b50" + version = "v1.2.0" + +[[projects]] + digest = "1:2761e287c811d0948d47d0252b82281eca3801eb3c9d5f9530956643d5b9f430" + name = "github.com/russross/blackfriday" + packages = ["."] + pruneopts = "" + revision = "05f3235734ad95d0016f6a23902f06461fcf567a" + version = "v1.5.2" + +[[projects]] + digest = "1:0c63b3c7ad6d825a898f28cb854252a3b29d37700c68a117a977263f5ec94efe" + name = "github.com/spf13/cobra" + packages = ["."] + pruneopts = "" + revision = "f2b07da1e2c38d5f12845a4f607e2e1018cbb1f5" + version = "v0.0.5" + +[[projects]] + digest = "1:688428eeb1ca80d92599eb3254bdf91b51d7e232fead3a73844c1f201a281e51" + name = "github.com/spf13/pflag" + packages = ["."] + pruneopts = "" + revision = "2e9d26c8c37aae03e3f9d4e90b7116f5accb7cab" + version = "v1.0.5" + +[[projects]] + digest = "1:f7b541897bcde05a04a044c342ddc7425aab7e331f37b47fbb486cd16324b48e" + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require", + ] + pruneopts = "" + revision = "221dbe5ed46703ee255b1da0dec05086f5035f62" + version = "v1.4.0" + +[[projects]] + digest = "1:c6b7dd6b1f602f71a9522d03a65150ace6550839b71c543efa5686d56686c913" + name = "github.com/urfave/cli" + packages = ["."] + pruneopts = "" + revision = "bfe2e925cfb6d44b40ad3a779165ea7e8aff9212" + version = "v1.22.0" + +[[projects]] + branch = "master" + digest = "1:715598815cb13b257c4e32153bdb11f899c82d0cd8c15831cca28e139ddf1b33" + name = "golang.org/x/crypto" + packages = [ + "curve25519", + "ed25519", + "ed25519/internal/edwards25519", + "internal/chacha20", + "internal/subtle", + "poly1305", + "ssh", + "ssh/agent", + "ssh/terminal", + ] + pruneopts = "" + revision = "a832865fa7ada6126f4c6124ac49f71be71bff2a" + +[[projects]] + branch = "master" + digest = "1:60eb80cc714e54c1179fca3454bd450ca5c1f9e1a0773266b9e0e6f3e7853a07" + name = "golang.org/x/net" + packages = [ + "context", + "context/ctxhttp", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + ] + pruneopts = "" + revision = "c00fd9afed17cfdca9b3e1e3b8de7ef2b3f0347b" + +[[projects]] + branch = "master" + digest = "1:01bdbbc604dcd5afb6f66a717f69ad45e9643c72d5bc11678d44ffa5c50f9e42" + name = "golang.org/x/oauth2" + packages = [ + ".", + "google", + "internal", + "jws", + "jwt", + ] + pruneopts = "" + revision = "0f29369cfe4552d0e4bcddc57cc75f4d7e672a33" + +[[projects]] + branch = "master" + digest = "1:02a5110b0c8cc83edf31f25794d8486cb57358f2b84250baa4235973f48124fb" + name = "golang.org/x/sys" + packages = [ + "cpu", + "unix", + "windows", + ] + pruneopts = "" + revision = "c990c680b611ac1aeb7d8f2af94a825f98d69720" + +[[projects]] + digest = "1:740b51a55815493a8d0f2b1e0d0ae48fe48953bf7eaf3fcc4198823bf67768c0" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/language", + "internal/language/compact", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "" + revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" + version = "v0.3.2" + +[[projects]] + branch = "master" + digest = "1:1290297b5048f051e77948812e93c4f09a914d02c8fb02cf2dfdaf23d73a5805" + name = "golang.org/x/time" + packages = ["rate"] + pruneopts = "" + revision = "c4c64cad1fd0a1a8dab2523e04e61d35308e131e" + +[[projects]] + digest = "1:702a3d272e930e37f4978392f69473eede3f5eea31a345c129085f899580c2cb" + name = "google.golang.org/appengine" + packages = [ + ".", + "cloudsql", + "internal", + "internal/app_identity", + "internal/base", + "internal/datastore", + "internal/log", + "internal/modules", + "internal/remote_api", + "internal/urlfetch", + "urlfetch", + ] + pruneopts = "" + revision = "bddb1f54ecfd64b1350fd2dd897fa11d426632bc" + version = "v1.6.4" + +[[projects]] + digest = "1:75fb3fcfc73a8c723efde7777b40e8e8ff9babf30d8c56160d01beffea8a95a6" + name = "gopkg.in/inf.v0" + packages = ["."] + pruneopts = "" + revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" + version = "v0.9.1" + +[[projects]] + digest = "1:cedccf16b71e86db87a24f8d4c70b0a855872eb967cb906a66b95de56aefbd0d" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "" + revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" + version = "v2.2.2" + +[[projects]] + digest = "1:bd9c8b7155b31a5b8f420169a470d47aae7f5c9e4e6c5c18bb15bcdbd1c51bfd" + name = "k8s.io/api" + packages = [ + "admissionregistration/v1alpha1", + "admissionregistration/v1beta1", + "apps/v1", + "apps/v1beta1", + "apps/v1beta2", + "authentication/v1", + "authentication/v1beta1", + "authorization/v1", + "authorization/v1beta1", + "autoscaling/v1", + "autoscaling/v2beta1", + "autoscaling/v2beta2", + "batch/v1", + "batch/v1beta1", + "batch/v2alpha1", + "certificates/v1beta1", + "coordination/v1beta1", + "core/v1", + "events/v1beta1", + "extensions/v1beta1", + "networking/v1", + "policy/v1beta1", + "rbac/v1", + "rbac/v1alpha1", + "rbac/v1beta1", + "scheduling/v1alpha1", + "scheduling/v1beta1", + "settings/v1alpha1", + "storage/v1", + "storage/v1alpha1", + "storage/v1beta1", + ] + pruneopts = "" + revision = "a33c8200050fc0751848276811abf3fc029b3133" + +[[projects]] + branch = "release-1.12" + digest = "1:131e8df2ac1479ca72a8141bc1abf340b63fadb781e3f5a45c1f857691967711" + name = "k8s.io/apimachinery" + packages = [ + "pkg/api/errors", + "pkg/api/meta", + "pkg/api/resource", + "pkg/apis/meta/internalversion", + "pkg/apis/meta/v1", + "pkg/apis/meta/v1/unstructured", + "pkg/apis/meta/v1beta1", + "pkg/conversion", + "pkg/conversion/queryparams", + "pkg/fields", + "pkg/labels", + "pkg/runtime", + "pkg/runtime/schema", + "pkg/runtime/serializer", + "pkg/runtime/serializer/json", + "pkg/runtime/serializer/protobuf", + "pkg/runtime/serializer/recognizer", + "pkg/runtime/serializer/streaming", + "pkg/runtime/serializer/versioning", + "pkg/selection", + "pkg/types", + "pkg/util/clock", + "pkg/util/errors", + "pkg/util/framer", + "pkg/util/httpstream", + "pkg/util/httpstream/spdy", + "pkg/util/intstr", + "pkg/util/json", + "pkg/util/naming", + "pkg/util/net", + "pkg/util/runtime", + "pkg/util/sets", + "pkg/util/validation", + "pkg/util/validation/field", + "pkg/util/yaml", + "pkg/version", + "pkg/watch", + "third_party/forked/golang/netutil", + "third_party/forked/golang/reflect", + ] + pruneopts = "" + revision = "6f131bee5e2ccfaf827e56866022a46d8b864d03" + +[[projects]] + branch = "release-9.0" + digest = "1:01c37e7c90b3fa8d86ab6759d9e1c9ba74fd68bff44a83a99a55d847df3231d7" + name = "k8s.io/client-go" + packages = [ + "discovery", + "kubernetes", + "kubernetes/scheme", + "kubernetes/typed/admissionregistration/v1alpha1", + "kubernetes/typed/admissionregistration/v1beta1", + "kubernetes/typed/apps/v1", + "kubernetes/typed/apps/v1beta1", + "kubernetes/typed/apps/v1beta2", + "kubernetes/typed/authentication/v1", + "kubernetes/typed/authentication/v1beta1", + "kubernetes/typed/authorization/v1", + "kubernetes/typed/authorization/v1beta1", + "kubernetes/typed/autoscaling/v1", + "kubernetes/typed/autoscaling/v2beta1", + "kubernetes/typed/autoscaling/v2beta2", + "kubernetes/typed/batch/v1", + "kubernetes/typed/batch/v1beta1", + "kubernetes/typed/batch/v2alpha1", + "kubernetes/typed/certificates/v1beta1", + "kubernetes/typed/coordination/v1beta1", + "kubernetes/typed/core/v1", + "kubernetes/typed/events/v1beta1", + "kubernetes/typed/extensions/v1beta1", + "kubernetes/typed/networking/v1", + "kubernetes/typed/policy/v1beta1", + "kubernetes/typed/rbac/v1", + "kubernetes/typed/rbac/v1alpha1", + "kubernetes/typed/rbac/v1beta1", + "kubernetes/typed/scheduling/v1alpha1", + "kubernetes/typed/scheduling/v1beta1", + "kubernetes/typed/settings/v1alpha1", + "kubernetes/typed/storage/v1", + "kubernetes/typed/storage/v1alpha1", + "kubernetes/typed/storage/v1beta1", + "pkg/apis/clientauthentication", + "pkg/apis/clientauthentication/v1alpha1", + "pkg/apis/clientauthentication/v1beta1", + "pkg/version", + "plugin/pkg/client/auth/exec", + "plugin/pkg/client/auth/gcp", + "rest", + "rest/watch", + "third_party/forked/golang/template", + "tools/auth", + "tools/clientcmd", + "tools/clientcmd/api", + "tools/clientcmd/api/latest", + "tools/clientcmd/api/v1", + "tools/metrics", + "tools/portforward", + "tools/reference", + "transport", + "transport/spdy", + "util/cert", + "util/connrotation", + "util/flowcontrol", + "util/homedir", + "util/integer", + "util/jsonpath", + ] + pruneopts = "" + revision = "386e588352a49a5c8dc7632348278569d4f57419" + +[[projects]] + digest = "1:7984928b646fbf09508938db5e464ec828cc3ae8afb75f8ac99cdaad658da418" + name = "k8s.io/kubernetes" + packages = [ + "pkg/apis/autoscaling", + "pkg/apis/core", + "pkg/kubectl/generate", + ] + pruneopts = "" + revision = "67d2fcf276fcd9cf743ad4be9a9ef5828adc082f" + version = "v1.15.4" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/gruntwork-io/terratest/modules/helm", + "github.com/stretchr/testify/require", + "k8s.io/api/apps/v1", + "k8s.io/api/apps/v1beta2", + "k8s.io/api/batch/v1", + "k8s.io/api/core/v1", + "k8s.io/api/extensions/v1beta1", + "k8s.io/api/rbac/v1", + "k8s.io/apimachinery/pkg/util/intstr", + "k8s.io/kubernetes/pkg/apis/autoscaling", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/terratest/src/test/Gopkg.toml b/terratest/src/test/Gopkg.toml new file mode 100644 index 000000000..f48e4ab79 --- /dev/null +++ b/terratest/src/test/Gopkg.toml @@ -0,0 +1,7 @@ +[[constraint]] + name = "github.com/gruntwork-io/terratest" + version = "v0.18.2" + +[[override]] + name = "github.com/russross/blackfriday" + version = "1.5.2" \ No newline at end of file diff --git a/terratest/src/test/aks_deploy_test.go b/terratest/src/test/aks_deploy_test.go new file mode 100644 index 000000000..fb2a7778b --- /dev/null +++ b/terratest/src/test/aks_deploy_test.go @@ -0,0 +1,69 @@ +package test + +import ( + "path/filepath" + "testing" + + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/stretchr/testify/require" +) + +const PegaHelmChartPath = "../../../charts/pega" + +// set action execute to install +var options = &helm.Options{ + SetValues: map[string]string{ + "global.actions.execute": "deploy", + "global.provider": "aks", + }, +} + +// TestPegaStandardTierDeployment - Test case to verify the standard pega tier deployment. +// Standard tier deployment includes web deployment, batch deployment, stream statefulset, search service, hpa, rolling update, web services, ingresses and config maps +func TestPegaAKSStandardTierDeployment(t *testing.T) { + t.Parallel() + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs(PegaHelmChartPath) + require.NoError(t, err) + + VerifyPegaStandardTierDeployment(t, helmChartPath, options, []string{"wait-for-pegasearch", "wait-for-cassandra"}) +} + +// set action execute to install +var installDeployoptions = &helm.Options{ + SetValues: map[string]string{ + "global.actions.execute": "install-deploy", + "global.provider": "aks", + }, +} + +// TestPegaAKSInstallDeployDeployment - Test case to verify the standard pega tier deployment. +// Standard tier deployment includes web deployment, batch deployment, stream statefulset, search service, hpa, rolling update, web services, ingresses and config maps +func TestPegaAKSInstallDeployDeployment(t *testing.T) { + t.Parallel() + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs(PegaHelmChartPath) + require.NoError(t, err) + + VerifyPegaStandardTierDeployment(t, helmChartPath, installDeployoptions, []string{"wait-for-pegainstall", "wait-for-pegasearch", "wait-for-cassandra"}) +} + +// set action execute to install +var upgradeDeployOptions = &helm.Options{ + SetValues: map[string]string{ + "global.actions.execute": "upgrade-deploy", + "global.provider": "aks", + }, +} + +// TestPegaAKSUpgradeDeployDeployment - Test case to verify the upgrade-deploy on AKS provider. +// Standard tier deployment includes web deployment, batch deployment, stream statefulset, search service, hpa, rolling update, web services, ingresses and config maps +// Special case in AKS during rolling restart to verify environments variables that are specific to AKS cluster - aksSpecificUpgraderDeployEnvs() method +func TestPegaAKSUpgradeDeployDeployment(t *testing.T) { + t.Parallel() + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs(PegaHelmChartPath) + require.NoError(t, err) + + VerifyPegaStandardTierDeployment(t, helmChartPath, upgradeDeployOptions, []string{"wait-for-pegaupgrade"}) +} diff --git a/terratest/src/test/common_utility.go b/terratest/src/test/common_utility.go new file mode 100644 index 000000000..a5349b02e --- /dev/null +++ b/terratest/src/test/common_utility.go @@ -0,0 +1,102 @@ +package test + +import ( + "fmt" + "io/ioutil" + "strings" + "testing" + + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/stretchr/testify/require" + k8score "k8s.io/api/core/v1" +) + +var volumeDefaultMode int32 = 420 +var volumeDefaultModePtr = &volumeDefaultMode + +// VerifyCredentialsSecret - Verifies the credential secret deployed with the values as provided in default values.yaml +func VerifyCredentialsSecret(t *testing.T, helmChartPath string, options *helm.Options) { + + secretOutput := helm.RenderTemplate(t, options, helmChartPath, []string{"templates/pega-credentials-secret.yaml"}) + var secretobj k8score.Secret + helm.UnmarshalK8SYaml(t, secretOutput, &secretobj) + secretData := secretobj.Data + require.Equal(t, string(secretData["DB_USERNAME"]), "YOUR_JDBC_USERNAME") + require.Equal(t, string(secretData["DB_PASSWORD"]), "YOUR_JDBC_PASSWORD") +} + +// VerfiyRegistrySecret - Verifies the registry secret deployed with the values as provided in default values.yaml +func VerfiyRegistrySecret(t *testing.T, helmChartPath string, options *helm.Options) { + + registrySecret := helm.RenderTemplate(t, options, helmChartPath, []string{"templates/pega-registry-secret.yaml"}) + var registrySecretObj k8score.Secret + helm.UnmarshalK8SYaml(t, registrySecret, ®istrySecretObj) + reqgistrySecretData := registrySecretObj.Data + require.Contains(t, string(reqgistrySecretData[".dockerconfigjson"]), "YOUR_DOCKER_REGISTRY") + require.Contains(t, string(reqgistrySecretData[".dockerconfigjson"]), "WU9VUl9ET0NLRVJfUkVHSVNUUllfVVNFUk5BTUU6WU9VUl9ET0NLRVJfUkVHSVNUUllfUEFTU1dPUkQ=") +} + +// compareConfigMapData - Compares the config map deployed for each kind of tier with the excepted xml's +func compareConfigMapData(t *testing.T, actualFileData string, expectedFileName string) { + expectedFile, err := ioutil.ReadFile(expectedFileName) + require.Empty(t, err) + expectedFileData := string(expectedFile) + expectedFileData = strings.Replace(expectedFileData, "\r", "", -1) + + equal := false + if expectedFileData == actualFileData { + equal = true + } + require.Equal(t, true, equal) +} + +//aksSpecificUpgraderDeployEnvs - Test aks specific upgrade-deploy environmnet variables in case of upgrade-deploy +func aksSpecificUpgraderDeployEnvs(t *testing.T, options *helm.Options, container k8score.Container) { + if options.SetValues["global.provider"] == "aks" && options.SetValues["global.actions.execute"] == "upgrade-deploy" { + require.Equal(t, container.Env[0].Name, "KUBERNETES_SERVICE_HOST") + require.Equal(t, container.Env[0].Value, "API_SERVICE_ADDRESS") + require.Equal(t, container.Env[1].Name, "KUBERNETES_SERVICE_PORT_HTTPS") + require.Equal(t, container.Env[1].Value, "SERVICE_PORT_HTTPS") + require.Equal(t, container.Env[2].Name, "KUBERNETES_SERVICE_PORT") + require.Equal(t, container.Env[2].Value, "SERVICE_PORT_HTTPS") + } +} + +// VerifyInitContinerData - Verifies any possible initContainer that can occur in pega helm chart deployments +func VerifyInitContinerData(t *testing.T, containers []k8score.Container, options *helm.Options) { + + if len(containers) == 0 { + println("no init containers") + } + + for i := 0; i < len(containers); i++ { + container := containers[i] + name := container.Name + if name == "wait-for-pegainstall" { + require.Equal(t, "dcasavant/k8s-wait-for", container.Image) + require.Equal(t, []string{"job", "pega-db-install"}, container.Args) + } else if name == "wait-for-pegasearch" { + require.Equal(t, "busybox:1.31.0", container.Image) + require.Equal(t, []string{"sh", "-c", "until $(wget -q -S --spider --timeout=2 -O /dev/null http://pega-search); do echo Waiting for search to become live...; sleep 10; done;"}, container.Command) + } else if name == "wait-for-cassandra" { + require.Equal(t, "cassandra:3.11.3", container.Image) + require.Equal(t, []string{"sh", "-c", "until cqlsh -u \"dnode_ext\" -p \"dnode_ext\" -e \"describe cluster\" release-name-cassandra 9042 ; do echo Waiting for cassandra to become live...; sleep 10; done;"}, container.Command) + } else if name == "wait-for-cassandra" { + require.Equal(t, "cassandra:3.11.3", container.Image) + require.Equal(t, []string{"sh", "-c", "until cqlsh -u \"dnode_ext\" -p \"dnode_ext\" -e \"describe cluster\" release-name-cassandra 9042 ; do echo Waiting for cassandra to become live...; sleep 10; done;"}, container.Command) + } else if name == "wait-for-pegaupgrade" { + require.Equal(t, "dcasavant/k8s-wait-for", container.Image) + require.Equal(t, []string{"job", "pega-db-upgrade"}, container.Args) + aksSpecificUpgraderDeployEnvs(t, options, container) + } else if name == "wait-for-pre-dbupgrade" { + require.Equal(t, "dcasavant/k8s-wait-for", container.Image) + require.Equal(t, []string{"job", "pega-pre-upgrade"}, container.Args) + } else if name == "wait-for-rolling-updates" { + require.Equal(t, "dcasavant/k8s-wait-for", container.Image) + require.Equal(t, []string{"sh", "-c", " kubectl rollout status deployment/pega-web --namespace default && kubectl rollout status deployment/pega-batch --namespace default && kubectl rollout status statefulset/pega-stream --namespace default"}, container.Command) + } else { + fmt.Println("invalid init containers found.. please check the list", name) + t.Fail() + } + } +} diff --git a/terratest/src/test/data/expectedInstallDeployContext.xml b/terratest/src/test/data/expectedInstallDeployContext.xml new file mode 100644 index 000000000..c29f9816f --- /dev/null +++ b/terratest/src/test/data/expectedInstallDeployContext.xml @@ -0,0 +1,31 @@ + + + + WEB-INF/web.xml + + + + + + + + {{ if .Env.CUSTOMERDATA_SCHEMA }} + + {{ else }} + + {{ end }} + + \ No newline at end of file diff --git a/terratest/src/test/data/expectedInstallDeployPRlog4j2.xml b/terratest/src/test/data/expectedInstallDeployPRlog4j2.xml new file mode 100644 index 000000000..0d0867934 --- /dev/null +++ b/terratest/src/test/data/expectedInstallDeployPRlog4j2.xml @@ -0,0 +1,119 @@ + + + + + + + + + %d [%20.20t] [%10.10X{pegathread}] [%20.20X{tenantid}] [%20.20X{app}] (%30.30c{3}) %-5p %X{stack} %X{userid} - %m%n + + + + + + + + + + + + + + + %m%n + + + + + + + + + + + + + %m%n + + + + + + + + + + + + + %d [%20.20t] [%20.20X{tenantid}] [%20.20X{app}] (%30.30c{3}) %-5p %X{stack} %X{userid} - %m%n + + + + + + + + + + %m%n + + + + + + + + + + %d [%20.20t] [%20.20X{tenantid}] [%20.20X{app}] (%30.30c{3}) %-5p %X{stack} %X{userid} - %m%n + + + + + + + + + + + %d (%30.30c{3}) %-5p - %m%n + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/terratest/src/test/data/expectedInstallDeployPrconfig.xml b/terratest/src/test/data/expectedInstallDeployPrconfig.xml new file mode 100644 index 000000000..2175a6f83 --- /dev/null +++ b/terratest/src/test/data/expectedInstallDeployPrconfig.xml @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/terratest/src/test/data/expectedMigrateSystem.properties.tmpl b/terratest/src/test/data/expectedMigrateSystem.properties.tmpl new file mode 100644 index 000000000..810e9076c --- /dev/null +++ b/terratest/src/test/data/expectedMigrateSystem.properties.tmpl @@ -0,0 +1,82 @@ +# Properties File for use with migrateSystem.xml Update this file +# before using migrate.bat/sh script. +# Set the DB connection + +################### COMMON PROPERTIES - DB CONNECTION ################## +######################################################################## + +#The system where the tables/rules will be migrated from +pega.source.jdbc.driver.jar={{ .Env.DRIVER_JAR_PATH }} +pega.source.jdbc.driver.class={{ .Env.JDBC_CLASS }} +pega.source.database.type={{ .Env.DB_TYPE }} +pega.source.jdbc.url={{ .Env.JDBC_URL }} +pega.source.jdbc.username={{ .Env.DB_USERNAME }} +pega.source.jdbc.password={{ .Env.DB_PASSWORD }} +#Custom connection properties +pega.source.jdbc.custom.connection.properties={{ .Env.JDBC_CUSTOM_CONNECTION }} + +pega.source.rules.schema={{ .Env.RULES_SCHEMA }} +#Set the following property if the source system already contains a split schema. +pega.source.data.schema={{ .Env.DATA_SCHEMA }} +# Used for systems with a separate Customer Data Schema +# The value of pega.source.data is the default value for pega.source.customerdata.schema +pega.source.customerdata.schema={{ .Env.CUSTOMERDATA_SCHEMA }} + +#The system where the tables/rules will be migrated to +pega.target.jdbc.driver.jar={{ .Env.DRIVER_JAR_PATH }} +pega.target.jdbc.driver.class={{ .Env.JDBC_CLASS }} +pega.target.database.type={{ .Env.DB_TYPE }} +pega.target.jdbc.url={{ .Env.JDBC_URL }} +pega.target.jdbc.username={{ .Env.DB_USERNAME }} +pega.target.jdbc.password={{ .Env.DB_PASSWORD }} +#Custom connection properties +pega.target.jdbc.custom.connection.properties={{ .Env.JDBC_CUSTOM_CONNECTION }} + +pega.target.rules.schema={{ .Env.TARGET_RULES_SCHEMA }} +#Used to correctly schema qualify tables in stored procedures, views and triggers. +#This property is not required if migrating before performing an upgrade. +pega.target.data.schema={{ .Env.TARGET_DATA_SCHEMA }} +# Used for systems with a separate Customer Data Schema +# The value of pega.target.data is the default value for pega.target.customerdata.schema +pega.target.customerdata.schema={{ .Env.TARGET_CUSTOMERDATA_SCHEMA }} + +#Set this property to bypass udf generation on the target system. +pega.target.bypass.udf={{ .Env.BYPASS_UDF_GENERATION }} + +#The location of the db2zos site specific properties file. Only used if the target system is a db2zos database. +pega.target.zos.properties=config/db2zos/DB2SiteDependent.properties + +#The commit count to use when loading database tables +db.load.commit.rate={{ .Env.MIGRATION_DB_LOAD_COMMIT_RATE }} + +################### Migrate System Properties ########################################### +#The directory where output from the bulk mover will be stored. This directory will be cleared when pega.bulkmover.unload.db is run. +#This property must be set if either pega.bulkmover.unload.db or pega.bulkmover.load.db is set to true. +pega.bulkmover.directory=/opt/pega/kit/scripts/upgrade/mover + +#The location where a temporary directory will be created for use by the migrate system utilities. +pega.migrate.temp.directory=/opt/pega/kit/scripts/upgrade/migrate + + +######## The operations to be run by the utility, they will only be run if the property is set to true. +#Set to true if migrating before an upgrade. If true admin table(s) required +#for an upgrade will be migrated with the rules tables. +pega.move.admin.table={{ .Env.MOVE_ADMIN_TABLE }} +#Generate an xml document containing the definitions of tables in the source system. It will be found in the schema directory of the +#distribution image. +pega.clone.generate.xml={{ .Env.CLONE_GENERATE_XML }} +#Create ddl from the generated xml document. This ddl can be used to create copies of rule tables found on the source system. +pega.clone.create.ddl={{ .Env.CLONE_CREATE_DDL }} +#Apply the generated clone ddl to the target system. +pega.clone.apply.ddl={{ .Env.CLONE_APPLY_DDL }} +#Unload the rows from the rules tables on the source system into the pega.bulkmover.directory. +pega.bulkmover.unload.db={{ .Env.BULKMOVER_UNLOAD_DB }} +#Load the rows onto the target system from the pega.bulkmover.directory. +pega.bulkmover.load.db={{ .Env.BULKMOVER_LOAD_DB }} + +### The following operations should only be run when migrating upgraded rules +#Generate the rules schema objects (views, triggers, procedures, functions). The objects will be created in the pega.target.rules.schema +#but will contain references to the pega.target.data.schema where appropriate. +pega.rules.objects.generate={{ .Env.RULES_OBJECTS_GENERATE }} +#Apply the rules schema objects (views, triggers, procedures, functions) to pega.target.rules.schema. +pega.rules.objects.apply={{ .Env.RULES_OBJECTS_APPLY }} \ No newline at end of file diff --git a/terratest/src/test/data/expectedPRPCUtils.properties.tmpl b/terratest/src/test/data/expectedPRPCUtils.properties.tmpl new file mode 100644 index 000000000..e05b8c857 --- /dev/null +++ b/terratest/src/test/data/expectedPRPCUtils.properties.tmpl @@ -0,0 +1,32 @@ +# Properties file for use with PRPC Utilities. + +################### COMMON PROPERTIES - DB CONNECTION ################## +######################################################################## +# CONNECTION INFORMATION +pega.jdbc.driver.jar={{ .Env.DRIVER_JAR_PATH }} +pega.jdbc.driver.class={{ .Env.JDBC_CLASS }} +pega.database.type={{ .Env.DB_TYPE }} +pega.jdbc.url={{ .Env.JDBC_URL }} +pega.jdbc.username={{ .Env.DB_USERNAME }} +pega.jdbc.password={{ .Env.DB_PASSWORD }} + +# CUSTOM CONNECTION PROPERTIES +jdbc.custom.connection.properties={{ .Env.JDBC_CUSTOM_CONNECTION }} + +# RULES SCHEMA NAME +rules.schema.name={{ .Env.RULES_SCHEMA }} + +# DATA SCHEMA NAME +data.schema.name={{ .Env.DATA_SCHEMA }} + +# CUSTOMER DATA SCHEMA NAME +customerdata.schema.name={{ .Env.CUSTOMERDATA_SCHEMA }} + +# USER TEMP DIRECTORY +# Will use default if not set to valid directory +user.temp.dir=/opt/pega/temp + +############################### SETTINGS FOR CHANGING DYNAMIC SYSTEM SETTINGS ######## +###################################################################################### +dass.filepath=/opt/pega/kit/scripts/upgrade_dass_settings.json +pega.codeset.version={{ .Env.ENGINE_CODESET_VERSION }} \ No newline at end of file diff --git a/terratest/src/test/data/expectedPRbootstrap.properties b/terratest/src/test/data/expectedPRbootstrap.properties new file mode 100644 index 000000000..fd6194b85 --- /dev/null +++ b/terratest/src/test/data/expectedPRbootstrap.properties @@ -0,0 +1,19 @@ +install.{{ .Env.DB_TYPE }}.schema={{ .Env.DATA_SCHEMA }} +initialization.settingsource=file +com.pega.pegarules.priv.LogHelper.USE_LOG4JV2=true +maxIdle={{ .Env.MAX_IDLE }} +com.pega.pegarules.bootstrap.engineclasses.tablename={{ .Env.RULES_SCHEMA }}.pr_engineclasses +install.{{ .Env.DB_TYPE }}.rulesSchema={{ .Env.RULES_SCHEMA }} +maxWait={{ .Env.MAX_WAIT }} +install.{{ .Env.DB_TYPE }}.url={{ .Env.JDBC_URL }} +maxActive={{ .Env.MAX_ACTIVE }} +install.{{ .Env.DB_TYPE }}.username={{ .Env.DB_USERNAME }} +{{ .Env.DB_TYPE }}.jdbc.class={{ .Env.JDBC_CLASS }} +com.pega.pegarules.bootstrap.assembledclasses.tablename={{ .Env.RULES_SCHEMA }}.pr_assembledclasses +com.pega.pegarules.bootstrap.assembledclasses.dbcpsource=install.{{ .Env.DB_TYPE }} +com.pega.pegarules.bootstrap.tempdir=/opt/pega/temp +poolPreparedStatements=true +install.{{ .Env.DB_TYPE }}.connectionProperties={{ .Env.JDBC_CUSTOM_CONNECTION }} +install.{{ .Env.DB_TYPE }}.password={{ .Env.DB_PASSWORD }} +com.pega.pegarules.bootstrap.codeset.version.Pega-EngineCode={{ .Env.CODESET_VERSION }} +com.pega.pegarules.bootstrap.engineclasses.dbcpsource=install.{{ .Env.DB_TYPE }} \ No newline at end of file diff --git a/terratest/src/test/data/expectedPRlog4j2.xml b/terratest/src/test/data/expectedPRlog4j2.xml new file mode 100644 index 000000000..35a2933a8 --- /dev/null +++ b/terratest/src/test/data/expectedPRlog4j2.xml @@ -0,0 +1,128 @@ + + + + + + + + + + + + + + + %d [%20.20t] [%10.10X{pegathread}] [%20.20X{tenantid}] [%20.20X{app}] (%30.30c{3}) %-5p %X{stack} %X{userid} - %m%n + + + + + + + + + + + + + + + + + %m%n + + + + + + + + + + + + + + %m%n + + + + + + + + + + + + + + %8r [%t] %-5p %c - %m%n + + + + + + + + + + + + + %d [%20.20t] [%10.10X{pegathread}] [%20.20X{app}] (%30.30c{3}) %-5p %X{stack} %X{userid} - %m%n + + + + + + + + + + + %d [%20.20t] [%10.10X{pegathread}] [%20.20X{app}] (%30.30c{3}) %-5p %X{stack} %X{userid} - %m%n + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/terratest/src/test/data/expectedPrconfig.xml b/terratest/src/test/data/expectedPrconfig.xml new file mode 100644 index 000000000..8373c9146 --- /dev/null +++ b/terratest/src/test/data/expectedPrconfig.xml @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/terratest/src/test/data/expectedSetupdatabase.properties b/terratest/src/test/data/expectedSetupdatabase.properties new file mode 100644 index 000000000..36f0db94d --- /dev/null +++ b/terratest/src/test/data/expectedSetupdatabase.properties @@ -0,0 +1,74 @@ +# Properties file for use with Pega Deployment Utilities. +# For more information, see the Pega Platform help. + +################### COMMON PROPERTIES - DB CONNECTION ################## +######################################################################## + +# CONNECTION INFORMATION +pega.jdbc.driver.jar={{ .Env.DRIVER_JAR_PATH }} +pega.jdbc.driver.class={{ .Env.JDBC_CLASS }} +pega.database.type={{ .Env.DB_TYPE }} +pega.jdbc.url={{ .Env.JDBC_URL }} +pega.jdbc.username={{ .Env.DB_USERNAME }} +pega.jdbc.password={{ .Env.DB_PASSWORD }} + +pega.admin.password={{ .Env.ADMIN_PASSWORD }} + +jdbc.custom.connection.properties={{ .Env.JDBC_CUSTOM_CONNECTION }} + +# RULES SCHEMA NAME +rules.schema.name={{ .Env.RULES_SCHEMA }} + +# DATA SCHEMA NAME +data.schema.name={{ .Env.DATA_SCHEMA }} + +# CUSTOMER DATA SCHEMA NAME +customerdata.schema.name={{ .Env.CUSTOMERDATA_SCHEMA }} + +# USER TEMP DIRECTORY +# Will use default if not set to valid directory +user.temp.dir=/opt/pega/temp + +# z/OS SITE-SPECIFIC PROPERTIES FILE +pega.zos.properties={{ .Env.ZOS_PROPERTIES }} + +# BYPASS UDF GENERATION? +bypass.udf.generation={{ .Env.BYPASS_UDF_GENERATION }} + +# BYPASS AUTOMATICALLY TRUNCATING PR_SYS_UPDATESCACHE? +bypass.truncate.updatescache={{ .Env.BYPASS_TRUNCATE_UPDATESCACHE }} + +# REBUILD DATABASE RULES INDEXES +rebuild.indexes={{ .Env.REBUILD_INDEXES }} + +# SYSTEM NAME +system.name={{ .Env.SYSTEM_NAME }} + +# PRODUCTION LEVEL +production.level={{ .Env.PRODUCTION_LEVEL }} + +# MULTITENANT SYSTEM? +# A multitenant system allows organizations to act as separate Pega Platform installations +multitenant.system={{ .Env.MT_SYSTEM }} + +# UPDATE EXISTING APPLICATIONS +update.existing.applications={{ .Env.UPDATE_EXISTING_APPLICATIONS }} + +# UPDATE APPLICATIONS SCHEMA +update.applications.schema={{ .Env.UPDATE_APPLICATIONS_SCHEMA }} + +# WORKLOAD MANAGER +db2zos.udf.wlm={{ .Env.DB2_ZOS_UDF_WLM }} + +# RUN RULESET CLEANUP? +run.ruleset.cleanup={{ .Env.RUN_RULESET_CLEANUP }} + +# CUSTOM CONFIGURATION PROPERTIES FILE +# The congfiguration files are dockerized using .tmpl files and are stored in opt/pega/config +# inside the container. +pegarules.config=/opt/pega/kit/scripts/prconfig.xml +prbootstrap.config=/opt/pega/kit/scripts/prbootstrap.properties +prlogging.config=/opt/pega/kit/scripts/prlog4j2.xml + +# Create schema if absent flag - Only from Docker related deployments +pega.schema.autocreate=true \ No newline at end of file diff --git a/terratest/src/test/deploy_test.go b/terratest/src/test/deploy_test.go new file mode 100644 index 000000000..30cb7ca95 --- /dev/null +++ b/terratest/src/test/deploy_test.go @@ -0,0 +1,30 @@ +package test + +import ( + "path/filepath" + "testing" + + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/stretchr/testify/require" +) + +const PegaHelmChartPath = "../../../charts/pega" + +// set action execute to install +var options = &helm.Options{ + SetValues: map[string]string{ + "global.provider": "k8s", + "global.actions.execute": "deploy", + }, +} + +// TestPegaStandardTierDeployment - Test case to verify the standard pega tier deployment. +// Standard tier deployment includes web deployment, batch deployment, stream statefulset, search service, hpa, rolling update, web services, ingresses and config maps +func TestPegaStandardTierDeployment(t *testing.T) { + t.Parallel() + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs(PegaHelmChartPath) + require.NoError(t, err) + + VerifyPegaStandardTierDeployment(t, helmChartPath, options, []string{"wait-for-pegasearch", "wait-for-cassandra"}) +} diff --git a/terratest/src/test/deployment_utility.go b/terratest/src/test/deployment_utility.go new file mode 100644 index 000000000..9af683fce --- /dev/null +++ b/terratest/src/test/deployment_utility.go @@ -0,0 +1,374 @@ +package test + +import ( + "strings" + "testing" + + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + appsv1beta2 "k8s.io/api/apps/v1beta2" + k8score "k8s.io/api/core/v1" + k8sv1beta1 "k8s.io/api/extensions/v1beta1" + intstr "k8s.io/apimachinery/pkg/util/intstr" + autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" + api "k8s.io/kubernetes/pkg/apis/core" +) + +var replicas int32 = 1 +var replicasPtr = &replicas +var ProgressDeadlineSeconds int32 = 2147483647 +var ProgressDeadlineSecondsPtr = &ProgressDeadlineSeconds +var rollingUpdate intstr.IntOrString = intstr.FromString("25%") +var rollingUpdatePtr = &rollingUpdate +var terminationGracePeriodSeconds int64 = 300 +var terminationGracePeriodSecondsPtr = &terminationGracePeriodSeconds + +type pegaDeployment struct { + name string + initContainers []string + nodeType string +} + +// VerifyPegaStandardTierDeployment - Verifies Pega standard tier deployment for values as provided in default values.yaml. +// It ensures syntax of web deployment, batch deployment, stream statefulset, search service, hpa, rolling update, web services, ingresses and config maps +func VerifyPegaStandardTierDeployment(t *testing.T, helmChartPath string, options *helm.Options, initContainers []string) { + + // Verify Deployment objects + SplitAndVerifyPegaDeployments(t, helmChartPath, options, initContainers) + + // Verify tier config + VerifyTierConfg(t, helmChartPath, options) + + // Verify environment config + VerifyEnvironmentConfig(t, helmChartPath, options) + + // Verify search service + VerifySearchService(t, helmChartPath, options) + + // Verfiy Pega deployed services + SplitAndVerifyPegaServices(t, helmChartPath, options) + + if options.SetValues["global.provider"] != "openshift" { + // Verify pega deployed ingresses + SplitAndVerifyPegaIngresses(t, helmChartPath, options) + } + // Verify Pega HPAObjects + SplitAndVerifyPegaHPAs(t, helmChartPath, options) + + // Verify search transport service + VerifySearchTransportService(t, helmChartPath, options) + +} + +// SplitAndVerifyPegaDeployments - Splits the deployments from the rendered template and asserts each deployment/statefulset objects +func SplitAndVerifyPegaDeployments(t *testing.T, helmChartPath string, options *helm.Options, initContainers []string) { + deployment := helm.RenderTemplate(t, options, helmChartPath, []string{"templates/pega-tier-deployment.yaml"}) + var deploymentObj appsv1.Deployment + var statefulsetObj appsv1beta2.StatefulSet + deploymentSlice := strings.Split(deployment, "---") + for index, deploymentInfo := range deploymentSlice { + if index >= 1 && index <= 3 { + + if index == 1 { + helm.UnmarshalK8SYaml(t, deploymentInfo, &deploymentObj) + VerifyPegaDeployment(t, &deploymentObj, pegaDeployment{"pega-web", initContainers, "WebUser"}, options) + } else if index == 2 { + helm.UnmarshalK8SYaml(t, deploymentInfo, &deploymentObj) + VerifyPegaDeployment(t, &deploymentObj, pegaDeployment{"pega-batch", initContainers, "BackgroundProcessing,Search,Batch,RealTime,Custom1,Custom2,Custom3,Custom4,Custom5,BIX"}, options) + } else if index == 3 { + helm.UnmarshalK8SYaml(t, deploymentInfo, &statefulsetObj) + VerifyPegaStatefulSet(t, &statefulsetObj, pegaDeployment{"pega-stream", initContainers, "Stream"}, options) + + } + } + } +} + +// VerifyDeployment - Performs common pega deployment/statefulset assertions with the values as provided in default values.yaml +func VerifyDeployment(t *testing.T, pod *k8score.PodSpec, expectedSpec pegaDeployment, options *helm.Options) { + require.Equal(t, pod.Volumes[0].Name, "pega-volume-config") + require.Equal(t, expectedSpec.name, pod.Volumes[0].VolumeSource.ConfigMap.LocalObjectReference.Name) + require.Equal(t, pod.Volumes[0].VolumeSource.ConfigMap.DefaultMode, volumeDefaultModePtr) + require.Equal(t, pod.Volumes[1].Name, "pega-volume-credentials") + require.Equal(t, pod.Volumes[1].VolumeSource.Secret.SecretName, "pega-credentials-secret") + require.Equal(t, pod.Volumes[1].VolumeSource.Secret.DefaultMode, volumeDefaultModePtr) + + actualInitContainers := pod.InitContainers + count := len(actualInitContainers) + actualInitContainerNames := make([]string, count) + for i := 0; i < count; i++ { + actualInitContainerNames[i] = actualInitContainers[i].Name + } + + require.Equal(t, expectedSpec.initContainers, actualInitContainerNames) + VerifyInitContinerData(t, actualInitContainers, options) + require.Equal(t, pod.Containers[0].Name, "pega-web-tomcat") + require.Equal(t, pod.Containers[0].Image, "pegasystems/pega") + require.Equal(t, pod.Containers[0].Ports[0].Name, "pega-web-port") + require.Equal(t, pod.Containers[0].Ports[0].ContainerPort, int32(8080)) + require.Equal(t, pod.Containers[0].Env[0].Name, "NODE_TYPE") + require.Equal(t, expectedSpec.nodeType, pod.Containers[0].Env[0].Value) + require.Equal(t, pod.Containers[0].Env[1].Name, "JAVA_OPTS") + require.Equal(t, pod.Containers[0].Env[1].Value, "") + require.Equal(t, pod.Containers[0].Env[2].Name, "INITIAL_HEAP") + require.Equal(t, pod.Containers[0].Env[2].Value, "4096m") + require.Equal(t, pod.Containers[0].Env[3].Name, "MAX_HEAP") + require.Equal(t, pod.Containers[0].Env[3].Value, "7168m") + require.Equal(t, pod.Containers[0].EnvFrom[0].ConfigMapRef.LocalObjectReference.Name, "pega-environment-config") + + require.Equal(t, "2", pod.Containers[0].Resources.Limits.Cpu().String()) + require.Equal(t, "8Gi", pod.Containers[0].Resources.Limits.Memory().String()) + require.Equal(t, "200m", pod.Containers[0].Resources.Requests.Cpu().String()) + require.Equal(t, "6Gi", pod.Containers[0].Resources.Requests.Memory().String()) + + require.Equal(t, pod.Containers[0].VolumeMounts[0].Name, "pega-volume-config") + require.Equal(t, pod.Containers[0].VolumeMounts[0].MountPath, "/opt/pega/config") + + require.Equal(t, pod.Containers[0].LivenessProbe.InitialDelaySeconds, int32(300)) + require.Equal(t, pod.Containers[0].LivenessProbe.TimeoutSeconds, int32(20)) + require.Equal(t, pod.Containers[0].LivenessProbe.PeriodSeconds, int32(10)) + require.Equal(t, pod.Containers[0].LivenessProbe.SuccessThreshold, int32(1)) + require.Equal(t, pod.Containers[0].LivenessProbe.FailureThreshold, int32(3)) + require.Equal(t, pod.Containers[0].LivenessProbe.HTTPGet.Path, "/prweb/PRRestService/monitor/pingService/ping") + require.Equal(t, pod.Containers[0].LivenessProbe.HTTPGet.Port, intstr.FromInt(8080)) + require.Equal(t, pod.Containers[0].LivenessProbe.HTTPGet.Scheme, k8score.URIScheme("HTTP")) + + require.Equal(t, pod.Containers[0].ReadinessProbe.InitialDelaySeconds, int32(300)) + require.Equal(t, pod.Containers[0].ReadinessProbe.TimeoutSeconds, int32(20)) + require.Equal(t, pod.Containers[0].ReadinessProbe.PeriodSeconds, int32(10)) + require.Equal(t, pod.Containers[0].ReadinessProbe.SuccessThreshold, int32(1)) + require.Equal(t, pod.Containers[0].ReadinessProbe.FailureThreshold, int32(3)) + require.Equal(t, pod.Containers[0].ReadinessProbe.HTTPGet.Path, "/prweb/PRRestService/monitor/pingService/ping") + require.Equal(t, pod.Containers[0].ReadinessProbe.HTTPGet.Port, intstr.FromInt(8080)) + require.Equal(t, pod.Containers[0].ReadinessProbe.HTTPGet.Scheme, k8score.URIScheme("HTTP")) + + require.Equal(t, pod.ImagePullSecrets[0].Name, "pega-registry-secret") + require.Equal(t, pod.RestartPolicy, k8score.RestartPolicy("Always")) + require.Equal(t, pod.TerminationGracePeriodSeconds, terminationGracePeriodSecondsPtr) + require.Equal(t, pod.Containers[0].VolumeMounts[0].Name, "pega-volume-config") + require.Equal(t, pod.Containers[0].VolumeMounts[0].MountPath, "/opt/pega/config") + require.Equal(t, pod.Volumes[0].Name, "pega-volume-config") + require.Equal(t, pod.Volumes[1].Name, "pega-volume-credentials") + require.Equal(t, pod.Volumes[1].Secret.SecretName, "pega-credentials-secret") + +} + +// VerifyPegaDeployment - Performs specific Pega deployment assertions with the values as provided in default values.yaml +func VerifyPegaDeployment(t *testing.T, deploymentObj *appsv1.Deployment, expectedDeployment pegaDeployment, options *helm.Options) { + require.Equal(t, deploymentObj.Spec.Replicas, replicasPtr) + require.Equal(t, deploymentObj.Spec.ProgressDeadlineSeconds, ProgressDeadlineSecondsPtr) + require.Equal(t, expectedDeployment.name, deploymentObj.Spec.Selector.MatchLabels["app"]) + require.Equal(t, deploymentObj.Spec.Strategy.RollingUpdate.MaxSurge, rollingUpdatePtr) + require.Equal(t, deploymentObj.Spec.Strategy.RollingUpdate.MaxUnavailable, rollingUpdatePtr) + require.Equal(t, deploymentObj.Spec.Strategy.Type, appsv1.DeploymentStrategyType("RollingUpdate")) + require.Equal(t, expectedDeployment.name, deploymentObj.Spec.Template.Labels["app"]) + require.NotEmpty(t, deploymentObj.Spec.Template.Annotations["config-check"]) + deploymentSpec := deploymentObj.Spec.Template.Spec + VerifyDeployment(t, &deploymentSpec, expectedDeployment, options) +} + +// VerifyPegaStatefulSet - Performs specific Pega statefulset assertions with the values as provided in default values.yaml +func VerifyPegaStatefulSet(t *testing.T, statefulsetObj *appsv1beta2.StatefulSet, expectedStatefulset pegaDeployment, options *helm.Options) { + require.Equal(t, statefulsetObj.Spec.VolumeClaimTemplates[0].Name, "pega-stream") + require.Equal(t, statefulsetObj.Spec.VolumeClaimTemplates[0].Spec.AccessModes[0], k8score.PersistentVolumeAccessMode("ReadWriteOnce")) + require.Equal(t, statefulsetObj.Spec.ServiceName, "pega-stream") + statefulsetSpec := statefulsetObj.Spec.Template.Spec + require.Equal(t, statefulsetSpec.Containers[0].VolumeMounts[1].Name, "pega-stream") + require.Equal(t, statefulsetSpec.Containers[0].VolumeMounts[1].MountPath, "/opt/pega/streamvol") + require.Equal(t, statefulsetSpec.Containers[0].VolumeMounts[2].Name, "pega-volume-credentials") + require.Equal(t, statefulsetSpec.Containers[0].VolumeMounts[2].MountPath, "/opt/pega/secrets") + VerifyDeployment(t, &statefulsetSpec, expectedStatefulset, options) +} + +type pegaServices struct { + Name string + Port int32 + TargetPort intstr.IntOrString +} + +// SplitAndVerifyPegaServices - Splits the services from the rendered template and asserts each service objects +func SplitAndVerifyPegaServices(t *testing.T, helmChartPath string, options *helm.Options) { + service := helm.RenderTemplate(t, options, helmChartPath, []string{"templates/pega-tier-service.yaml"}) + var pegaServiceObj k8score.Service + serviceSlice := strings.Split(service, "---") + for index, serviceInfo := range serviceSlice { + if index >= 1 && index <= 2 { + helm.UnmarshalK8SYaml(t, serviceInfo, &pegaServiceObj) + if index == 1 { + VerifyPegaService(t, &pegaServiceObj, pegaServices{"pega-web", int32(80), intstr.IntOrString{IntVal: 8080}}, options) + } else { + VerifyPegaService(t, &pegaServiceObj, pegaServices{"pega-stream", int32(7003), intstr.IntOrString{IntVal: 7003}}, options) + } + } + } +} + +// VerifyPegaService - Performs Pega Service assertions with the values as provided in default values.yaml +func VerifyPegaService(t *testing.T, serviceObj *k8score.Service, expectedService pegaServices, options *helm.Options) { + provider := options.SetValues["global.provider"] + if !(provider == "openshift" || provider == "eks") { + require.Equal(t, serviceObj.Annotations["traefik.ingress.kubernetes.io/affinity"], "true") + require.Equal(t, serviceObj.Annotations["traefik.ingress.kubernetes.io/load-balancer-method"], "drr") + require.Equal(t, serviceObj.Annotations["traefik.ingress.kubernetes.io/max-conn-amount"], "10") + require.Equal(t, serviceObj.Annotations["traefik.ingress.kubernetes.io/session-cookie-name"], "UNIQUE-PEGA-COOKIE-NAME") + } + require.Equal(t, serviceObj.Spec.Type, k8score.ServiceType("NodePort")) + require.Equal(t, serviceObj.Spec.Selector["app"], expectedService.Name) + require.Equal(t, serviceObj.Spec.Ports[0].Port, expectedService.Port) + require.Equal(t, serviceObj.Spec.Ports[0].TargetPort, expectedService.TargetPort) +} + +type pegaIngress struct { + Name string + Port intstr.IntOrString +} + +// VerifyPegaIngresses - Splits the ingresses from the rendered template and asserts each ingress object +func SplitAndVerifyPegaIngresses(t *testing.T, helmChartPath string, options *helm.Options) { + ingress := helm.RenderTemplate(t, options, helmChartPath, []string{"templates/pega-tier-ingress.yaml"}) + var pegaIngressObj k8sv1beta1.Ingress + ingressSlice := strings.Split(ingress, "---") + for index, ingressInfo := range ingressSlice { + if index >= 1 && index <= 2 { + helm.UnmarshalK8SYaml(t, ingressInfo, &pegaIngressObj) + if index == 1 { + VerifyPegaIngress(t, &pegaIngressObj, pegaIngress{"pega-web", intstr.IntOrString{IntVal: 80}}, options) + } else { + VerifyPegaIngress(t, &pegaIngressObj, pegaIngress{"pega-stream", intstr.IntOrString{IntVal: 7003}}, options) + } + + } + } +} + +func VerifyPegaIngress(t *testing.T, ingressObj *k8sv1beta1.Ingress, expectedIngress pegaIngress, options *helm.Options) { + provider := options.SetValues["global.provider"] + if provider == "eks" { + VerifyEKSIngress(t, ingressObj, expectedIngress) + } else { + VerifyK8SIngress(t, ingressObj, expectedIngress) + } +} + +func VerifyEKSIngress(t *testing.T, ingressObj *k8sv1beta1.Ingress, expectedIngress pegaIngress) { + require.Equal(t, "alb", ingressObj.Annotations["kubernetes.io/ingress.class"]) + require.Equal(t, "[{\"HTTP\": 80}, {\"HTTPS\": 443}]", ingressObj.Annotations["alb.ingress.kubernetes.io/listen-ports"]) + require.Equal(t, "{\"Type\": \"redirect\", \"RedirectConfig\": { \"Protocol\": \"HTTPS\", \"Port\": \"443\", \"StatusCode\": \"HTTP_301\"}}", ingressObj.Annotations["alb.ingress.kubernetes.io/actions.ssl-redirect"]) + require.Equal(t, "internet-facing", ingressObj.Annotations["alb.ingress.kubernetes.io/scheme"]) + require.Equal(t, "stickiness.enabled=true,stickiness.lb_cookie.duration_seconds=3660", ingressObj.Annotations["alb.ingress.kubernetes.io/target-group-attributes"]) + require.Equal(t, "ip", ingressObj.Annotations["alb.ingress.kubernetes.io/target-type"]) + require.Equal(t, "ssl-redirect", ingressObj.Spec.Rules[0].HTTP.Paths[0].Backend.ServiceName) + require.Equal(t, "use-annotation", ingressObj.Spec.Rules[0].HTTP.Paths[0].Backend.ServicePort.StrVal) + require.Equal(t, expectedIngress.Name, ingressObj.Spec.Rules[1].HTTP.Paths[0].Backend.ServiceName) + require.Equal(t, expectedIngress.Port, ingressObj.Spec.Rules[1].HTTP.Paths[0].Backend.ServicePort) +} + +// VerifyPegaIngress - Performs Pega Ingress assertions with the values as provided in default values.yaml +func VerifyK8SIngress(t *testing.T, ingressObj *k8sv1beta1.Ingress, expectedIngress pegaIngress) { + require.Equal(t, ingressObj.Annotations["kubernetes.io/ingress.class"], "traefik") + require.Equal(t, expectedIngress.Name, ingressObj.Spec.Rules[0].HTTP.Paths[0].Backend.ServiceName) + require.Equal(t, expectedIngress.Port, ingressObj.Spec.Rules[0].HTTP.Paths[0].Backend.ServicePort) +} + +// VerifySearchService - Verifies search service deployment used by search pod with the values as provided in default values.yaml +func VerifySearchService(t *testing.T, helmChartPath string, options *helm.Options) { + + searchService := helm.RenderTemplate(t, options, helmChartPath, []string{"charts/pegasearch/templates/pega-search-service.yaml"}) + var searchServiceObj k8score.Service + helm.UnmarshalK8SYaml(t, searchService, &searchServiceObj) + require.Equal(t, searchServiceObj.Spec.Selector["component"], "Search") + require.Equal(t, searchServiceObj.Spec.Selector["app"], "pega-search") + require.Equal(t, searchServiceObj.Spec.Ports[0].Name, "http") + require.Equal(t, searchServiceObj.Spec.Ports[0].Port, int32(80)) + require.Equal(t, searchServiceObj.Spec.Ports[0].TargetPort, intstr.FromInt(9200)) +} + +// VerifyEnvironmentConfig - Verifies the environment configuration used by the pods with the values as provided in default values.yaml +func VerifyEnvironmentConfig(t *testing.T, helmChartPath string, options *helm.Options) { + + envConfig := helm.RenderTemplate(t, options, helmChartPath, []string{"templates/pega-environment-config.yaml"}) + var envConfigMap k8score.ConfigMap + helm.UnmarshalK8SYaml(t, envConfig, &envConfigMap) + envConfigData := envConfigMap.Data + require.Equal(t, envConfigData["DB_TYPE"], "YOUR_DATABASE_TYPE") + require.Equal(t, envConfigData["JDBC_URL"], "YOUR_JDBC_URL") + require.Equal(t, envConfigData["JDBC_CLASS"], "YOUR_JDBC_DRIVER_CLASS") + require.Equal(t, envConfigData["JDBC_DRIVER_URI"], "YOUR_JDBC_DRIVER_URI") + if options.SetValues["global.actions.execute"] == "upgrade-deploy" { + require.Equal(t, envConfigData["RULES_SCHEMA"], "") + } else { + require.Equal(t, envConfigData["RULES_SCHEMA"], "YOUR_RULES_SCHEMA") + } + require.Equal(t, envConfigData["DATA_SCHEMA"], "YOUR_DATA_SCHEMA") + require.Equal(t, envConfigData["CUSTOMERDATA_SCHEMA"], "") + require.Equal(t, envConfigData["JDBC_CONNECTION_PROPERTIES"], "") + require.Equal(t, envConfigData["PEGA_SEARCH_URL"], "http://pega-search") + require.Equal(t, envConfigData["CASSANDRA_CLUSTER"], "true") + require.Equal(t, envConfigData["CASSANDRA_NODES"], "release-name-cassandra") + require.Equal(t, envConfigData["CASSANDRA_PORT"], "9042") +} + +// VerifyTierConfg - Performs the tier specific configuration assetions with the values as provided in default values.yaml +func VerifyTierConfg(t *testing.T, helmChartPath string, options *helm.Options) { + config := helm.RenderTemplate(t, options, helmChartPath, []string{"templates/pega-tier-config.yaml"}) + var pegaConfigMap k8score.ConfigMap + configSlice := strings.Split(config, "---") + for index, configData := range configSlice { + if index >= 1 && index <= 3 { + helm.UnmarshalK8SYaml(t, configData, &pegaConfigMap) + pegaConfigMapData := pegaConfigMap.Data + compareConfigMapData(t, pegaConfigMapData["prconfig.xml"], "data/expectedInstallDeployPrconfig.xml") + compareConfigMapData(t, pegaConfigMapData["context.xml.tmpl"], "data/expectedInstallDeployContext.xml") + compareConfigMapData(t, pegaConfigMapData["prlog4j2.xml"], "data/expectedInstallDeployPRlog4j2.xml") + } + } +} + +type hpa struct { + name string + targetRefName string + kind string + apiversion string +} + +// VerifyPegaHPAs - Splits the HPA object from the rendered template and asserts each HPA object +func SplitAndVerifyPegaHPAs(t *testing.T, helmChartPath string, options *helm.Options) { + pegaHpa := helm.RenderTemplate(t, options, helmChartPath, []string{"templates/pega-tier-hpa.yaml"}) + var pegaHpaObj autoscaling.HorizontalPodAutoscaler + hpaSlice := strings.SplitAfter(pegaHpa, "85") + for index, hpaInfo := range hpaSlice { + if index >= 0 && index <= 1 { + helm.UnmarshalK8SYaml(t, hpaInfo, &pegaHpaObj) + if index == 0 { + VerifyPegaHpa(t, &pegaHpaObj, hpa{"pega-web-hpa", "pega-web", "Deployment", "extensions/v1beta1"}) + } else { + VerifyPegaHpa(t, &pegaHpaObj, hpa{"pega-batch-hpa", "pega-batch", "Deployment", "extensions/v1beta1"}) + } + } + } +} + +// VerifyPegaHpa - Performs Pega HPA assertions with the values as provided in default values.yaml +func VerifyPegaHpa(t *testing.T, hpaObj *autoscaling.HorizontalPodAutoscaler, expectedHpa hpa) { + require.Equal(t, hpaObj.Spec.ScaleTargetRef.Name, expectedHpa.targetRefName) + require.Equal(t, hpaObj.Spec.ScaleTargetRef.Kind, expectedHpa.kind) + require.Equal(t, hpaObj.Spec.ScaleTargetRef.APIVersion, expectedHpa.apiversion) + require.Equal(t, hpaObj.Spec.Metrics[0].Resource.Name, api.ResourceName("cpu")) + require.Equal(t, hpaObj.Spec.Metrics[1].Resource.Name, api.ResourceName("memory")) + require.Equal(t, hpaObj.Spec.MaxReplicas, int32(5)) +} + +// VerifySearchTransportService - Performs the search transport service assertions deployed with the values as provided in default values.yaml +func VerifySearchTransportService(t *testing.T, helmChartPath string, options *helm.Options) { + transportSearchService := helm.RenderTemplate(t, options, helmChartPath, []string{"charts/pegasearch/templates/pega-search-transport-service.yaml"}) + var transportSearchServiceObj k8score.Service + helm.UnmarshalK8SYaml(t, transportSearchService, &transportSearchServiceObj) + + require.Equal(t, transportSearchServiceObj.Spec.Selector["component"], "Search") + require.Equal(t, transportSearchServiceObj.Spec.Selector["app"], "pega-search") + require.Equal(t, transportSearchServiceObj.Spec.ClusterIP, "None") + require.Equal(t, transportSearchServiceObj.Spec.Ports[0].Name, "transport") + require.Equal(t, transportSearchServiceObj.Spec.Ports[0].Port, int32(80)) + require.Equal(t, transportSearchServiceObj.Spec.Ports[0].TargetPort, intstr.FromInt(9300)) +} diff --git a/terratest/src/test/eks_deploy_test.go b/terratest/src/test/eks_deploy_test.go new file mode 100644 index 000000000..58758142c --- /dev/null +++ b/terratest/src/test/eks_deploy_test.go @@ -0,0 +1,30 @@ +package test + +import ( + "path/filepath" + "testing" + + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/stretchr/testify/require" +) + +const PegaHelmChartPath = "../../../charts/pega" + +// set action execute to install +var options = &helm.Options{ + SetValues: map[string]string{ + "global.provider": "eks", + "global.actions.execute": "deploy", + }, +} + +// TestPegaStandardTierDeployment - Test case to verify the standard pega tier deployment. +// Standard tier deployment includes web deployment, batch deployment, stream statefulset, search service, hpa, rolling update, web services, ingresses and config maps +func TestPegaStandardTierDeployment(t *testing.T) { + t.Parallel() + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs(PegaHelmChartPath) + require.NoError(t, err) + + VerifyPegaStandardTierDeployment(t, helmChartPath, options, []string{"wait-for-pegasearch", "wait-for-cassandra"}) +} diff --git a/terratest/src/test/install_deploy_test.go b/terratest/src/test/install_deploy_test.go new file mode 100644 index 000000000..4c07a521a --- /dev/null +++ b/terratest/src/test/install_deploy_test.go @@ -0,0 +1,52 @@ +package test + +import ( + "testing" + + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + k8sbatch "k8s.io/api/batch/v1" +) + +const pegaHelmChartPath = "../../../charts/pega" + +// Sets the the action to install-deploy, all test cases present in this file uses this action +var options = &helm.Options{ + SetValues: map[string]string{ + "global.provider": "k8s", + "global.actions.execute": "install-deploy", + }, +} + +// VerifyInstallDeployActionSkippedTemplates - Tests all the skipped templates for action install-deploy. These templates not supposed to be rendered for install-deploy action. +func VerifyInstallDeployActionSkippedTemplates(t *testing.T) { + output := helm.RenderTemplate(t, options, pegaHelmChartPath, []string{ + "templates/pega-action-validate.yaml", + "charts/installer/templates/pega-upgrade-environment-config.yaml", + }) + + var deployment appsv1.Deployment + helm.UnmarshalK8SYaml(t, output, &deployment) + // assert that above templates are not rendered + require.Empty(t, deployment) +} + +// VerifyInstallDeployActionInstallerJob - Tests Install job yaml rendered with the values as provided in default values.yaml for action install-deploy +func VerifyInstallDeployActionInstallerJob(t *testing.T) { + var installerJobObj k8sbatch.Job + var installerSlice = ReturnJobSlices(t, pegaHelmChartPath, options) + helm.UnmarshalK8SYaml(t, installerSlice[1], &installerJobObj) + VerifyPegaJob(t, options, &installerJobObj, pegaJob{"pega-db-install", []string{}, "pega-install-environment-config"}) +} + +// TestInstallDeployActions - Test all objects deployed for install-deploy action with the values as provided in default values.yaml +func TestInstallDeployActions(t *testing.T) { + VerifyInstallDeployActionSkippedTemplates(t) + VerifyInstallDeployActionInstallerJob(t) + VerifyInstallerConfigMaps(t, options, pegaHelmChartPath) + VerifyInstallEnvConfig(t, options, pegaHelmChartPath) + VerifyInstallerRoleBinding(t, options, pegaHelmChartPath) + VerifyInstallerRole(t, options, pegaHelmChartPath) + VerifyPegaStandardTierDeployment(t, pegaHelmChartPath, options, []string{"wait-for-pegainstall", "wait-for-pegasearch", "wait-for-cassandra"}) +} diff --git a/terratest/src/test/install_test.go b/terratest/src/test/install_test.go new file mode 100644 index 000000000..ae44c7a9d --- /dev/null +++ b/terratest/src/test/install_test.go @@ -0,0 +1,81 @@ +package test + +import ( + "io/ioutil" + "path/filepath" + "testing" + + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + k8sbatch "k8s.io/api/batch/v1" +) + +const pegaHelmChartPath = "../../../charts/pega" +const dbConfFileLocation = "../../../charts/pega/charts/installer/config" + +// set action execute to install +var options = &helm.Options{ + SetValues: map[string]string{ + "global.actions.execute": "install", + "cassandra.enabled": "false", + "global.provider": "k8s", + }, +} + +// VerifyInstallActionSkippedTemplates - Tests all the skipped templates for action install. These templates not supposed to be rendered for install action. +func VerifyInstallActionSkippedTemplates(t *testing.T) { + helmChartPath, err := filepath.Abs(pegaHelmChartPath) + require.NoError(t, err) + + output := helm.RenderTemplate(t, options, helmChartPath, []string{ + "templates/pega-action-validate.yaml", + "templates/pega-environment-config.yaml", + "templates/pega-tier-config.yaml", + "templates/pega-tier-deployment.yaml", + "templates/pega-tier-hpa.yaml", + "templates/pega-tier-ingress.yaml", + "templates/pega-tier-service.yaml", + "charts/installer/templates/pega-installer-role.yaml", + "charts/installer/templates/pega-installer-status-rolebinding.yaml", + }) + + var emptyObjects appsv1.Deployment + helm.UnmarshalK8SYaml(t, output, &emptyObjects) + + // assert that above templates are not rendered + require.Empty(t, emptyObjects) +} + +// VerifyInstallActionInstallJob - Tests Install job yaml rendered with the values as provided in default values.yaml +func VerifyInstallActionInstallJob(t *testing.T) { + var upgradeJobObj k8sbatch.Job + var upgradeSlice = ReturnJobSlices(t, pegaHelmChartPath, options) + helm.UnmarshalK8SYaml(t, upgradeSlice[1], &upgradeJobObj) + VerifyPegaJob(t, options, &upgradeJobObj, pegaJob{"pega-db-install", []string{}, "pega-install-environment-config"}) +} + +//TestInstallActions - Test all objects deployed for install action with the values as provided in default values.yaml +func TestInstallActions(t *testing.T) { + VerifyInstallActionSkippedTemplates(t) + VerifyInstallActionInstallJob(t) + VerifyInstallEnvConfig(t, options, pegaHelmChartPath) + VerfiyRegistrySecret(t, pegaHelmChartPath, options) + VerifyCredentialsSecret(t, pegaHelmChartPath, options) + VerifyInstallerConfigMaps(t, options, pegaHelmChartPath) +} + +//TestDBConfFiles - Test all the files in "../../../charts/pega/charts/installer/config" folder where DB Conf files are present +func TestDBConfFiles(t *testing.T) { + actuallist, _ := ioutil.ReadDir(dbConfFileLocation) + require.Equal(t, 12, len(actuallist)) + + names := []string{"DB2SiteDependent.properties", "db2zos.conf", "migrateSystem.properties.tmpl", "mssql.conf", "oracledate.conf", "postgres.conf", "prbootstrap.properties.tmpl", "prconfig.xml.tmpl", + "prlog4j2.xml", "prpcUtils.properties.tmpl", "setupDatabase.properties.tmpl", "udb.conf"} + + require.Equal(t, len(names), len(actuallist)) + + for i, v := range actuallist { + require.Equal(t, names[i], v.Name()) + } +} diff --git a/terratest/src/test/installer_utility.go b/terratest/src/test/installer_utility.go new file mode 100644 index 000000000..56a056f8b --- /dev/null +++ b/terratest/src/test/installer_utility.go @@ -0,0 +1,190 @@ +package test + +import ( + "path/filepath" + "strings" + "testing" + + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/stretchr/testify/require" + k8sbatch "k8s.io/api/batch/v1" + k8score "k8s.io/api/core/v1" + k8srbac "k8s.io/api/rbac/v1" +) + +type pegaJob struct { + name string + initContainers []string + configMapName string +} + +// ReturnJobSlices - returns string array of rendered yaml sepearted by delimiter as "---" +func ReturnJobSlices(t *testing.T, pegaHelmChartPath string, options *helm.Options) []string { + helmChartPath, err := filepath.Abs(pegaHelmChartPath) + require.NoError(t, err) + + installerJob := helm.RenderTemplate(t, options, helmChartPath, []string{"charts/installer/templates/pega-installer-job.yaml"}) + + installerSlice := strings.Split(installerJob, "---") + return installerSlice +} + +// VerifyPegaJob - Tests installer jobs rendered with the values as provided in default values.yaml +func VerifyPegaJob(t *testing.T, options *helm.Options, installerJobObj *k8sbatch.Job, expectedJob pegaJob) { + installerJobSpec := installerJobObj.Spec.Template.Spec + installerJobConatiners := installerJobObj.Spec.Template.Spec.Containers + + var containerPort int32 = 8080 + + require.Equal(t, installerJobSpec.Volumes[0].Name, "pega-volume-credentials") + require.Equal(t, installerJobSpec.Volumes[0].VolumeSource.Secret.SecretName, "pega-credentials-secret") + require.Equal(t, installerJobSpec.Volumes[0].VolumeSource.Secret.DefaultMode, volumeDefaultModePtr) + require.Equal(t, installerJobSpec.Volumes[1].Name, "pega-volume-installer") + require.Equal(t, installerJobSpec.Volumes[1].VolumeSource.ConfigMap.LocalObjectReference.Name, "pega-installer-config") + require.Equal(t, installerJobSpec.Volumes[1].VolumeSource.ConfigMap.DefaultMode, volumeDefaultModePtr) + + require.Equal(t, installerJobConatiners[0].Name, expectedJob.name) + require.Equal(t, "YOUR_INSTALLER_IMAGE:TAG", installerJobConatiners[0].Image) + require.Equal(t, installerJobConatiners[0].Ports[0].ContainerPort, containerPort) + require.Equal(t, installerJobConatiners[0].VolumeMounts[0].Name, "pega-volume-installer") + require.Equal(t, installerJobConatiners[0].VolumeMounts[0].MountPath, "/opt/pega/config") + require.Equal(t, installerJobConatiners[0].VolumeMounts[1].Name, "pega-volume-credentials") + require.Equal(t, installerJobConatiners[0].VolumeMounts[1].MountPath, "/opt/pega/secrets") + require.Equal(t, installerJobConatiners[0].EnvFrom[0].ConfigMapRef.LocalObjectReference.Name, expectedJob.configMapName) + + require.Equal(t, installerJobSpec.ImagePullSecrets[0].Name, "pega-registry-secret") + + require.Equal(t, installerJobSpec.RestartPolicy, k8score.RestartPolicy("Never")) + + actualInitContainers := installerJobSpec.InitContainers + count := len(actualInitContainers) + actualInitContainerNames := make([]string, count) + for i := 0; i < count; i++ { + actualInitContainerNames[i] = actualInitContainers[i].Name + } + + require.Equal(t, expectedJob.initContainers, actualInitContainerNames) + VerifyInitContinerData(t, actualInitContainers, options) +} + +// VerifyUpgradeEnvConfig - Tests upgrade environment config rendered with the values as provided in default values.yaml +func VerifyUpgradeEnvConfig(t *testing.T, options *helm.Options, pegaHelmChartPath string) { + helmChartPath, err := filepath.Abs(pegaHelmChartPath) + require.NoError(t, err) + // pega-install-environment-config.yaml + upgradeEnvConfig := helm.RenderTemplate(t, options, helmChartPath, []string{"charts/installer/templates/pega-upgrade-environment-config.yaml"}) + var upgradeEnvConfigMap k8score.ConfigMap + helm.UnmarshalK8SYaml(t, upgradeEnvConfig, &upgradeEnvConfigMap) + + upgradeEnvConfigData := upgradeEnvConfigMap.Data + + require.Equal(t, upgradeEnvConfigData["DB_TYPE"], "YOUR_DATABASE_TYPE") + require.Equal(t, upgradeEnvConfigData["JDBC_URL"], "YOUR_JDBC_URL") + require.Equal(t, upgradeEnvConfigData["JDBC_CLASS"], "YOUR_JDBC_DRIVER_CLASS") + require.Equal(t, upgradeEnvConfigData["JDBC_DRIVER_URI"], "YOUR_JDBC_DRIVER_URI") + require.Equal(t, upgradeEnvConfigData["RULES_SCHEMA"], "YOUR_RULES_SCHEMA") + require.Equal(t, upgradeEnvConfigData["DATA_SCHEMA"], "YOUR_DATA_SCHEMA") + require.Equal(t, upgradeEnvConfigData["CUSTOMERDATA_SCHEMA"], "") + require.Equal(t, upgradeEnvConfigData["UPGRADE_TYPE"], "in-place") + require.Equal(t, upgradeEnvConfigData["MULTITENANT_SYSTEM"], "false") + require.Equal(t, upgradeEnvConfigData["BYPASS_UDF_GENERATION"], "true") + require.Equal(t, upgradeEnvConfigData["ZOS_PROPERTIES"], "/opt/pega/config/DB2SiteDependent.properties") + require.Equal(t, upgradeEnvConfigData["DB2ZOS_UDF_WLM"], "") + require.Equal(t, upgradeEnvConfigData["TARGET_RULES_SCHEMA"], "") + require.Equal(t, upgradeEnvConfigData["TARGET_ZOS_PROPERTIES"], "/opt/pega/config/DB2SiteDependent.properties") + require.Equal(t, upgradeEnvConfigData["MIGRATION_DB_LOAD_COMMIT_RATE"], "100") + require.Equal(t, upgradeEnvConfigData["UPDATE_EXISTING_APPLICATIONS"], "false") + require.Equal(t, upgradeEnvConfigData["UPDATE_APPLICATIONS_SCHEMA"], "false") + require.Equal(t, upgradeEnvConfigData["RUN_RULESET_CLEANUP"], "false") + require.Equal(t, upgradeEnvConfigData["REBUILD_INDEXES"], "false") + require.Equal(t, upgradeEnvConfigData["DISTRIBUTION_KIT_URL"], "") +} + +// VerifyInstallEnvConfig - Tests Installer environment config rendered with the values as provided in default values.yaml +func VerifyInstallEnvConfig(t *testing.T, options *helm.Options, pegaHelmChartPath string) { + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs(pegaHelmChartPath) + require.NoError(t, err) + installEnvConfig := helm.RenderTemplate(t, options, helmChartPath, []string{"charts/installer/templates/pega-install-environment-config.yaml"}) + var installEnvConfigMap k8score.ConfigMap + helm.UnmarshalK8SYaml(t, installEnvConfig, &installEnvConfigMap) + + installEnvConfigData := installEnvConfigMap.Data + require.Equal(t, installEnvConfigData["DB_TYPE"], "YOUR_DATABASE_TYPE") + require.Equal(t, installEnvConfigData["JDBC_URL"], "YOUR_JDBC_URL") + require.Equal(t, installEnvConfigData["JDBC_CLASS"], "YOUR_JDBC_DRIVER_CLASS") + require.Equal(t, installEnvConfigData["JDBC_DRIVER_URI"], "YOUR_JDBC_DRIVER_URI") + require.Equal(t, installEnvConfigData["RULES_SCHEMA"], "YOUR_RULES_SCHEMA") + require.Equal(t, installEnvConfigData["DATA_SCHEMA"], "YOUR_DATA_SCHEMA") + require.Equal(t, installEnvConfigData["CUSTOMERDATA_SCHEMA"], "") + require.Equal(t, installEnvConfigData["SYSTEM_NAME"], "pega") + require.Equal(t, installEnvConfigData["PRODUCTION_LEVEL"], "2") + require.Equal(t, installEnvConfigData["MULTITENANT_SYSTEM"], "false") + require.Equal(t, "ADMIN_PASSWORD", installEnvConfigData["ADMIN_PASSWORD"]) + require.Equal(t, "", installEnvConfigData["STATIC_ASSEMBLER"]) + require.Equal(t, installEnvConfigData["BYPASS_UDF_GENERATION"], "true") + require.Equal(t, installEnvConfigData["BYPASS_TRUNCATE_UPDATESCACHE"], "false") + require.Equal(t, installEnvConfigData["JDBC_CUSTOM_CONNECTION"], "") + require.Equal(t, installEnvConfigData["MAX_IDLE"], "5") + require.Equal(t, installEnvConfigData["MAX_WAIT"], "-1") + require.Equal(t, installEnvConfigData["MAX_ACTIVE"], "10") + require.Equal(t, installEnvConfigData["ZOS_PROPERTIES"], "/opt/pega/config/DB2SiteDependent.properties") + require.Equal(t, installEnvConfigData["DB2ZOS_UDF_WLM"], "") + require.Equal(t, installEnvConfigData["DISTRIBUTION_KIT_URL"], "") + require.Equal(t, installEnvConfigData["ACTION"], options.SetValues["global.actions.execute"]) + require.Equal(t, "", installEnvConfigData["DISTRIBUTION_KIT_URL"]) + +} + +// VerifyInstallerRoleBinding - Tests Installer role binding rendered with the values as provided in default values.yaml +func VerifyInstallerRoleBinding(t *testing.T, options *helm.Options, pegaHelmChartPath string) { + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs(pegaHelmChartPath) + require.NoError(t, err) + + installerRoleBinding := helm.RenderTemplate(t, options, helmChartPath, []string{"charts/installer/templates/pega-installer-status-rolebinding.yaml"}) + var installerRoleBindingObj k8srbac.RoleBinding + helm.UnmarshalK8SYaml(t, installerRoleBinding, &installerRoleBindingObj) + require.Equal(t, installerRoleBindingObj.RoleRef.APIGroup, "rbac.authorization.k8s.io") + require.Equal(t, installerRoleBindingObj.RoleRef.Kind, "Role") + require.Equal(t, installerRoleBindingObj.RoleRef.Name, "jobs-reader") + + require.Equal(t, installerRoleBindingObj.Subjects[0].Kind, "ServiceAccount") + require.Equal(t, installerRoleBindingObj.Subjects[0].Name, "default") + require.Equal(t, installerRoleBindingObj.Subjects[0].Namespace, "default") +} + +// VerifyInstallerRole - Tests Installer role rendered with the values as provided in default values.yaml +func VerifyInstallerRole(t *testing.T, options *helm.Options, pegaHelmChartPath string) { + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs(pegaHelmChartPath) + require.NoError(t, err) + + deployRole := helm.RenderTemplate(t, options, helmChartPath, []string{"charts/installer/templates/pega-installer-role.yaml"}) + var deployRoleObj k8srbac.Role + helm.UnmarshalK8SYaml(t, deployRole, &deployRoleObj) + require.Equal(t, deployRoleObj.Rules[0].APIGroups, []string{"", "batch", "extensions", "apps"}) + require.Equal(t, deployRoleObj.Rules[0].Resources, []string{"jobs", "deployments", "statefulsets"}) + require.Equal(t, deployRoleObj.Rules[0].Verbs, []string{"get", "watch", "list"}) +} + +// VerifyInstallerConfigMaps - Tests Installer configuration rendered with the values as provided in default values.yaml +func VerifyInstallerConfigMaps(t *testing.T, options *helm.Options, pegaHelmChartPath string) { + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs(pegaHelmChartPath) + require.NoError(t, err) + + installerConfig := helm.RenderTemplate(t, options, helmChartPath, []string{"charts/installer/templates/pega-installer-config.yaml"}) + var installConfigMap k8score.ConfigMap + helm.UnmarshalK8SYaml(t, installerConfig, &installConfigMap) + + installConfigData := installConfigMap.Data + + compareConfigMapData(t, installConfigData["prconfig.xml.tmpl"], "data/expectedPrconfig.xml") + compareConfigMapData(t, installConfigData["setupDatabase.properties.tmpl"], "data/expectedSetupdatabase.properties") + compareConfigMapData(t, installConfigData["prbootstrap.properties.tmpl"], "data/expectedPRbootstrap.properties") + compareConfigMapData(t, installConfigData["migrateSystem.properties.tmpl"], "data/expectedMigrateSystem.properties.tmpl") + compareConfigMapData(t, installConfigData["prlog4j2.xml"], "data/expectedPRlog4j2.xml") + compareConfigMapData(t, installConfigData["prpcUtils.properties.tmpl"], "data/expectedPRPCUtils.properties.tmpl") +} diff --git a/terratest/src/test/invalidAction_test.go b/terratest/src/test/invalidAction_test.go new file mode 100644 index 000000000..6a4dbbceb --- /dev/null +++ b/terratest/src/test/invalidAction_test.go @@ -0,0 +1,52 @@ +package test + +import ( + "path/filepath" + "testing" + + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/stretchr/testify/require" +) + +const PegaHelmChartPath = "../../../charts/pega" + +// TestInvalidAction - Tests in valid action correctly rendering error +func TestInvalidAction(t *testing.T) { + t.Parallel() + + // set action execute to install + var Invalidoptions = &helm.Options{ + SetValues: map[string]string{ + "global.actions.execute": "invalid-action", + "global.provider": "openshift", + }, + } + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs(PegaHelmChartPath) + require.NoError(t, err) + + deployment, err := helm.RenderTemplateE(t, Invalidoptions, helmChartPath, []string{"templates/pega-action-validate.yaml"}) + + require.Error(t, err) + require.Contains(t, string(deployment), "Action value is not correct") + +} + +// TestValidAction - Tests valid action +func TestValidAction(t *testing.T) { + t.Parallel() + // set action execute to install + var Invalidoptions = &helm.Options{ + SetValues: map[string]string{ + "global.actions.execute": "deploy", + "global.provider": "openshift", + }, + } + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs(PegaHelmChartPath) + require.NoError(t, err) + + deployment, err := helm.RenderTemplateE(t, Invalidoptions, helmChartPath, []string{"templates/pega-action-validate.yaml"}) + require.NoError(t, err) + require.NotContains(t, string(deployment), "Action value is not correct") +} diff --git a/terratest/src/test/openshift_test.go b/terratest/src/test/openshift_test.go new file mode 100644 index 000000000..918d895e1 --- /dev/null +++ b/terratest/src/test/openshift_test.go @@ -0,0 +1,30 @@ +package test + +import ( + "path/filepath" + "testing" + + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/stretchr/testify/require" +) + +const PegaHelmChartPath = "../../../charts/pega" + +// set action execute to install +var options = &helm.Options{ + SetValues: map[string]string{ + "global.actions.execute": "deploy", + "global.provider": "openshift", + }, +} + +// TestOpenshiftPegaTierDeployment - Test case to verify the standard pega tier deployment in Openshift. +// Standard tier deployment includes web deployment, batch deployment, stream statefulset, search service, hpa, rolling update, web services, ingresses and config maps +func TestOpenshiftPegaTierDeployment(t *testing.T) { + t.Parallel() + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs(PegaHelmChartPath) + require.NoError(t, err) + + VerifyPegaStandardTierDeployment(t, helmChartPath, options, []string{"wait-for-pegasearch", "wait-for-cassandra"}) +} diff --git a/terratest/src/test/upgrade_deploy_test.go b/terratest/src/test/upgrade_deploy_test.go new file mode 100644 index 000000000..00ac1a705 --- /dev/null +++ b/terratest/src/test/upgrade_deploy_test.go @@ -0,0 +1,67 @@ +package test + +import ( + "testing" + + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + k8sbatch "k8s.io/api/batch/v1" +) + +const pegaHelmChartPath = "../../../charts/pega" + +var options = &helm.Options{ + SetValues: map[string]string{ + "global.actions.execute": "upgrade-deploy", + "global.provider": "k8s", + }, +} + +// VerifyUpgradeDeployActionShouldNotRenderDeployments - Tests all the skipped templates for action upgrade-deploy. These templates not supposed to be rendered for upgrade-deploy action. +func VerifyUpgradeActionSkippedTemplates(t *testing.T) { + t.Parallel() + output := helm.RenderTemplate(t, options, pegaHelmChartPath, []string{ + "templates/pega-action-validate.yaml", + "charts/installer/templates/pega-install-environment-config.yaml", + }) + + var deployment appsv1.Deployment + helm.UnmarshalK8SYaml(t, output, &deployment) + // assert that above templates are not rendered + require.Empty(t, deployment) +} + +// ValidateUpgradeJobs - Tests Upgrade jobs yaml rendered with the values as provided in default values.yaml for action upgrade-deploy +func ValidateUpgradeJobs(t *testing.T) { + var installerJobObj k8sbatch.Job + var installerSlice = ReturnJobSlices(t, pegaHelmChartPath, options) + println(len(installerSlice)) + var expectedJob pegaJob + for index, installerInfo := range installerSlice { + if index >= 1 && index <= 3 { + if index == 1 { + expectedJob = pegaJob{"pega-pre-upgrade", []string{}, "pega-upgrade-environment-config"} + } else if index == 2 { + expectedJob = pegaJob{"pega-db-upgrade", []string{"wait-for-pre-dbupgrade"}, "pega-upgrade-environment-config"} + } else if index == 3 { + expectedJob = pegaJob{"pega-post-upgrade", []string{"wait-for-pegaupgrade", "wait-for-rolling-updates"}, "pega-upgrade-environment-config"} + } + + helm.UnmarshalK8SYaml(t, installerInfo, &installerJobObj) + VerifyPegaJob(t, options, &installerJobObj, expectedJob) + } + + } +} + +// TestUpgradeDeployActions - Test all objects deployed for upgrade-deploy action with the values as provided in default values.yaml +func TestUpgradeDeployActions(t *testing.T) { + VerifyUpgradeActionSkippedTemplates(t) + ValidateUpgradeJobs(t) + VerifyUpgradeEnvConfig(t, options, pegaHelmChartPath) + VerifyInstallerConfigMaps(t, options, pegaHelmChartPath) + VerifyInstallerRoleBinding(t, options, pegaHelmChartPath) + VerifyInstallerRole(t, options, pegaHelmChartPath) + VerifyPegaStandardTierDeployment(t, pegaHelmChartPath, options, []string{"wait-for-pegaupgrade"}) +} diff --git a/terratest/src/test/upgrade_test.go b/terratest/src/test/upgrade_test.go new file mode 100644 index 000000000..7fb06b196 --- /dev/null +++ b/terratest/src/test/upgrade_test.go @@ -0,0 +1,64 @@ +package test + +import ( + "testing" + + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + k8sbatch "k8s.io/api/batch/v1" +) + +// Path to the helm chart we will test +const pegaHelmChartPath = "../../../charts/pega" + +// set action execute to install +var options = &helm.Options{ + SetValues: map[string]string{ + "global.actions.execute": "upgrade", + "cassandra.enabled": "false", + "global.provider": "k8s", + }, +} + +// VerifyUpgradeActionShouldNotRenderDeployments - Tests all the skipped templates for action upgrade. These templates not supposed to be rendered for upgrade action. +func VerifyUpgradeActionSkippedTemplates(t *testing.T) { + output := helm.RenderTemplate(t, options, pegaHelmChartPath, []string{ + "templates/pega-action-validate.yaml", + "charts/installer/templates/pega-installer-role.yaml", + "templates/pega-environment-config.yaml", + "charts/installer/templates/pega-installer-status-rolebinding.yaml", + "charts/pegasearch/templates/pega-search-deployment.yaml", + "charts/pegasearch/templates/pega-search-service.yaml", + "charts/pegasearch/templates/pega-search-transport-service.yaml", + "charts/installer/templates/pega-install-environment-config.yaml", + "templates/pega-tier-config.yaml", + "templates/pega-tier-deployment.yaml", + "templates/pega-tier-hpa.yaml", + "templates/pega-tier-ingress.yaml", + "templates/pega-tier-service.yaml", + }) + var deployment appsv1.Deployment + helm.UnmarshalK8SYaml(t, output, &deployment) + + // assert that above templates are not rendered + require.Empty(t, deployment) +} + +// VerifyUpgradeActionInstallJob - Tests upgrade job yaml rendered with the values as provided in default values.yaml +func VerifyUpgradeActionInstallJob(t *testing.T) { + var upgradeJobObj k8sbatch.Job + var upgradeSlice = ReturnJobSlices(t, pegaHelmChartPath, options) + helm.UnmarshalK8SYaml(t, upgradeSlice[1], &upgradeJobObj) + VerifyPegaJob(t, options, &upgradeJobObj, pegaJob{"pega-db-upgrade", []string{}, "pega-upgrade-environment-config"}) +} + +//TestUpgradeActions - Test all objects deployed for upgrade action with the values as provided in default values.yaml +func TestUpgradeActions(t *testing.T) { + VerifyUpgradeActionSkippedTemplates(t) + VerifyUpgradeActionInstallJob(t) + VerifyUpgradeEnvConfig(t, options, pegaHelmChartPath) + VerfiyRegistrySecret(t, pegaHelmChartPath, options) + VerifyCredentialsSecret(t, pegaHelmChartPath, options) + VerifyInstallerConfigMaps(t, options, pegaHelmChartPath) +}