diff --git a/.github/workflows/github-actions-build.yml b/.github/workflows/github-actions-build.yml index cf0a40bad..71b62af76 100644 --- a/.github/workflows/github-actions-build.yml +++ b/.github/workflows/github-actions-build.yml @@ -2,7 +2,7 @@ name: Pega Chart Build env: HELM_URL: https://get.helm.sh - HELM_TGZ: helm-v3.11.3-linux-amd64.tar.gz + HELM_TGZ: helm-v3.14.3-linux-amd64.tar.gz YAMLLINT_VERSION: 1.34.0 GO_VERSION: 1.21.6 diff --git a/charts/addons/Chart.yaml b/charts/addons/Chart.yaml index 2d1a299f9..17a145cea 100644 --- a/charts/addons/Chart.yaml +++ b/charts/addons/Chart.yaml @@ -3,4 +3,4 @@ apiVersion: v1 appVersion: "1.0" description: A Helm chart for Kubernetes name: addons -version: "3.23.0" +version: "3.24.0" diff --git a/charts/backingservices/Chart.yaml b/charts/backingservices/Chart.yaml index 4d08f63a6..925f4717b 100644 --- a/charts/backingservices/Chart.yaml +++ b/charts/backingservices/Chart.yaml @@ -17,4 +17,4 @@ description: Helm Chart to provision the latest Search and Reporting Service (SR # The chart version: Pega provides this as a useful way to track changes you make to this chart. # As a best practice, you should increment the version number each time you make changes to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: "3.23.0" +version: "3.24.0" diff --git a/charts/backingservices/charts/constellation-messaging/README.md b/charts/backingservices/charts/constellation-messaging/README.md index 492ecbf97..ded3b2f74 100644 --- a/charts/backingservices/charts/constellation-messaging/README.md +++ b/charts/backingservices/charts/constellation-messaging/README.md @@ -17,4 +17,5 @@ Complete information on the design of the service including architecture, scalab | `imagePullSecretNames` | List pre-existing secrets to be used for pulling docker images. | | `pegaMessagingPort` | Defines the port used by the Service. | | `pegaMessagingTargetPort` | Defines the port used by the Pod and Container. | +| `affinity` | Define pod affinity so that it is restricted to run on particular node(s), or to prefer to run on particular nodes. | | `ingress` | Allows optional configuration of a domain name, ingressClass, and annotations. An ingress will be provisioned if a domain name is supplied. Due to the diversity of network configurations, ingress vendors, and TLS requirements it may be necessary to define your ingress separately from this chart. diff --git a/charts/backingservices/charts/constellation-messaging/templates/_helpers.tpl b/charts/backingservices/charts/constellation-messaging/templates/_helpers.tpl new file mode 100644 index 000000000..8ba76dfa7 --- /dev/null +++ b/charts/backingservices/charts/constellation-messaging/templates/_helpers.tpl @@ -0,0 +1,6 @@ +{{- define "podAffinity" }} +{{- if .affinity }} +affinity: +{{- toYaml .affinity | nindent 2 }} +{{- end }} +{{ end }} \ No newline at end of file diff --git a/charts/backingservices/charts/constellation-messaging/templates/messaging-deployment.yaml b/charts/backingservices/charts/constellation-messaging/templates/messaging-deployment.yaml index c5769e954..d76b3064a 100644 --- a/charts/backingservices/charts/constellation-messaging/templates/messaging-deployment.yaml +++ b/charts/backingservices/charts/constellation-messaging/templates/messaging-deployment.yaml @@ -23,10 +23,19 @@ spec: - name: c11n-messaging imagePullPolicy: {{ .Values.imagePullPolicy }} image: {{ .Values.image }} + resources: + {{ if .Values.resources }} + {{ toYaml .Values.resources | nindent 10 }} + {{- end }} + securityContext: + {{- if .Values.securityContext }} + {{ toYaml .Values.securityContext | nindent 10 }} + {{- end }} args: - --max-semi-space-size=1024 - port={{ .Values.pegaMessagingTargetPort }} - path=/c11n-messaging ports: - containerPort: {{ .Values.pegaMessagingTargetPort }} +{{- include "podAffinity" .Values | indent 6 }} {{ end }} diff --git a/charts/backingservices/charts/constellation-messaging/values.yaml b/charts/backingservices/charts/constellation-messaging/values.yaml index 4212f295b..6c28d7ade 100644 --- a/charts/backingservices/charts/constellation-messaging/values.yaml +++ b/charts/backingservices/charts/constellation-messaging/values.yaml @@ -13,6 +13,20 @@ imagePullPolicy: Always pegaMessagingPort: 3000 pegaMessagingTargetPort: 3000 +# set memoryRequest & memoryLimit to Limit memory usage for container https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory +# resources: +# requests: +# memory: # ex: 128Mi for MB or 2Gi for GB +# limits: +# memory: # ex: 256Mi for MB or 4Gi for GB +securityContext: + seccompProfile: + # set seccompProfile to RuntimeDefault to not disable default seccomp profile https://kubernetes.io/docs/tutorials/security/seccomp/ + type: Unconfined # RuntimeDefault + # DO NOT CHANGE readOnlyRootFilesystem VALUE to true, C11N MESSAGING WON'T WORK AS EXPECTED + readOnlyRootFilesystem: false + # set allowPrivilegeEscalation to false to Restrict container from acquiring additional privileges https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + allowPrivilegeEscalation: true # false serviceType: NodePort # An ingress will be provisioned if a hostname is defined, or omitted if the hostname is empty. diff --git a/charts/backingservices/charts/constellation/README.md b/charts/backingservices/charts/constellation/README.md index 0d94b16cc..d999dc80a 100644 --- a/charts/backingservices/charts/constellation/README.md +++ b/charts/backingservices/charts/constellation/README.md @@ -48,6 +48,7 @@ The values.yaml file provides configuration options to define the values for the | `ingressAnnotations` | Specify additional annotations to add to the ingress. | | `customerAssetVolumeClaimName` | Specify the volume claim name to be used for storing customer assets. | | `imagePullSecretNames` | Specify a list of existing ImagePullSecrets to be added to the Deployment. | +| `affinity` | Specify the pod affinity so that pods are restricted to run on particular node(s), or to prefer to run on particular nodes. | `docker`.`registry`.`url` | Specify the image registry url. | | `docker`.`registry`.`username` | Specify the username for the docker registry. | | `docker`.`registry`.`password` | Specify the password for the docker registry. | diff --git a/charts/backingservices/charts/constellation/templates/_helpers.tpl b/charts/backingservices/charts/constellation/templates/_helpers.tpl index 829116a44..91da6041d 100644 --- a/charts/backingservices/charts/constellation/templates/_helpers.tpl +++ b/charts/backingservices/charts/constellation/templates/_helpers.tpl @@ -8,3 +8,10 @@ {{- end }} {{- define "deploymentName" }}{{ $deploymentNamePrefix := "constellation" }}{{ if (.Values.deployment) }}{{ if (.Values.deployment.name) }}{{ $deploymentNamePrefix = .Values.deployment.name }}{{ end }}{{ end }}{{ $deploymentNamePrefix }}{{- end }} + +{{- define "podAffinity" }} +{{- if .affinity }} +affinity: +{{- toYaml .affinity | nindent 2 }} +{{- end }} +{{ end }} \ No newline at end of file diff --git a/charts/backingservices/charts/constellation/templates/clln-deployment.yaml b/charts/backingservices/charts/constellation/templates/clln-deployment.yaml index da2b74117..ac27fe34b 100644 --- a/charts/backingservices/charts/constellation/templates/clln-deployment.yaml +++ b/charts/backingservices/charts/constellation/templates/clln-deployment.yaml @@ -31,6 +31,14 @@ spec: containers: - name: constellation image: {{ .Values.docker.constellation.image }} + resources: + {{ if .Values.resources }} + {{ toYaml .Values.resources | nindent 10 }} + {{- end }} + securityContext: + {{- if .Values.securityContext }} + {{ toYaml .Values.securityContext | nindent 10 }} + {{- end }} {{ if .Values.customerAssetVolumeClaimName }} volumeMounts: - name: constellation-appstatic-assets @@ -61,4 +69,5 @@ spec: port: 3000 ports: - containerPort: 3000 +{{- include "podAffinity" .Values | indent 6 }} {{ end }} \ No newline at end of file diff --git a/charts/backingservices/charts/constellation/templates/clln-ingress.yaml b/charts/backingservices/charts/constellation/templates/clln-ingress.yaml index 6560502d5..7552e7caf 100644 --- a/charts/backingservices/charts/constellation/templates/clln-ingress.yaml +++ b/charts/backingservices/charts/constellation/templates/clln-ingress.yaml @@ -18,6 +18,9 @@ metadata: {{- toYaml .Values.ingressAnnotations | nindent 4 }} {{ end }} spec: +{{- if .Values.ingressClassName }} + ingressClassName: {{ .Values.ingressClassName }} +{{- end }} rules: - host: {{ .Values.domainName }} http: diff --git a/charts/backingservices/charts/constellation/values.yaml b/charts/backingservices/charts/constellation/values.yaml index 5ad744d64..3d0b60a36 100644 --- a/charts/backingservices/charts/constellation/values.yaml +++ b/charts/backingservices/charts/constellation/values.yaml @@ -25,6 +25,22 @@ docker: image: cirrus-docker.jfrog.io/constellation-appstatic-service/docker-image:1.0.8-20221228123724 logLevel: info urlPath: /c11n +# ingressClassName is optional and will be included if defined. +ingressClassName: +# set memoryRequest & memoryLimit to Limit memory usage for container https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory +# resources: +# requests: +# memory: # ex: 128Mi for MB or 2Gi for GB +# limits: +# memory: # ex: 256Mi for MB or 4Gi for GB +securityContext: + seccompProfile: + # set seccompProfile to RuntimeDefault to not disable default seccomp profile https://kubernetes.io/docs/tutorials/security/seccomp/ + type: Unconfined # RuntimeDefault + # DO NOT CHANGE readOnlyRootFilesystem VALUE to true, C11N SERVICE WON'T WORK AS EXPECTED + readOnlyRootFilesystem: false + # set allowPrivilegeEscalation to false to Restrict container from acquiring additional privileges https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + allowPrivilegeEscalation: true # false replicas: 1 livenessProbe: initialDelaySeconds: 5 diff --git a/charts/backingservices/charts/srs/README.md b/charts/backingservices/charts/srs/README.md index 6562cf24e..341dc8b3b 100644 --- a/charts/backingservices/charts/srs/README.md +++ b/charts/backingservices/charts/srs/README.md @@ -108,7 +108,7 @@ To deploy Pega Platform with the SRS backing service, the SRS helm chart require |-----------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `enabled` | Enable the Search and Reporting Service deployment as a backing service. Set this parameter to `true` to use SRS. | | `deploymentName` | Specify the name of your SRS cluster. Your deployment creates resources prefixed with this string. This is also the service name for the SRS. | -| `srsRuntime` | Use this section to define specific resource configuration options like image, replica count, cpu and memory resource settings in the SRS. The default minimum required number of replicas is 2, but as a best practice, deploy 3 replicas to maintain high availability. | +| `srsRuntime` | Use this section to define specific resource configuration options like image, replica count, pod affinity, cpu and memory resource settings in the SRS. The default minimum required number of replicas is 2, but as a best practice, deploy 3 replicas to maintain high availability. | | `busybox` | When provisioning an internally managed Elasticsearch cluster, you can customize the location and pull policy of the Alpine image used during the deployment process by specifying `busybox.image` and `busybox.imagePullPolicy`. | | `elasticsearch` | Define the elasticsearch cluster configurations. The [Elasticsearch](https://github.com/helm/charts/tree/master/stable/elasticsearch/values.yaml) chart defines the values for Elasticsearch provisioning in the SRS cluster. For internally provisioned Elasticsearch the default version is set to `7.17.9`. Set the `elasticsearch.imageTag` parameter in values.yaml to `7.16.3` to use this supported version in the SRS cluster. | | `k8sProvider` | Specify your Kubernetes provider name. Supported values are [`eks`, `aks`, `minikube`, `gke`, `openshift`, `pks`].. diff --git a/charts/backingservices/charts/srs/templates/_helpers.tpl b/charts/backingservices/charts/srs/templates/_helpers.tpl index b1336ccb3..55f6cc2ae 100644 --- a/charts/backingservices/charts/srs/templates/_helpers.tpl +++ b/charts/backingservices/charts/srs/templates/_helpers.tpl @@ -222,3 +222,10 @@ Network policy: `openshift-dns` for openshift cluster, `kube-dns | core-dns` for port: 8080 {{- end -}} {{- end -}} + +{{- define "podAffinity" }} +{{- if .affinity }} +affinity: +{{- toYaml .affinity | nindent 2 }} +{{- end }} +{{ end }} diff --git a/charts/backingservices/charts/srs/templates/srsservice_deployment.yaml b/charts/backingservices/charts/srs/templates/srsservice_deployment.yaml index 5bc225ca3..fb1470588 100644 --- a/charts/backingservices/charts/srs/templates/srsservice_deployment.yaml +++ b/charts/backingservices/charts/srs/templates/srsservice_deployment.yaml @@ -147,4 +147,5 @@ spec: name: srs-elastic-credentials key: password {{ end }} +{{- include "podAffinity" .Values.srsRuntime | indent 6 }} {{ end }} \ No newline at end of file diff --git a/charts/pega/Chart.yaml b/charts/pega/Chart.yaml index aa3a93961..d2b895238 100644 --- a/charts/pega/Chart.yaml +++ b/charts/pega/Chart.yaml @@ -1,7 +1,7 @@ --- apiVersion: v1 name: pega -version: "3.23.0" +version: "3.24.0" description: Pega installation on kubernetes keywords: - pega diff --git a/charts/pega/README.md b/charts/pega/README.md index 1b5e7fb11..3145b60e1 100644 --- a/charts/pega/README.md +++ b/charts/pega/README.md @@ -709,6 +709,26 @@ tier: : ``` +### Pod affinity + +You may optionally configure the pod affinity so that it is restricted to run on particular node(s), or to prefer to run on particular nodes. Pod affinity may be specified by using the `affinity` element for a given `tier`. See the official [Kubernetes Documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). + +Example: + +```yaml +tier: + - name: my-tier + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux +``` + ### Pega configuration files While Pega includes default configuration files in the Helm charts, the charts provide extension points to override the defaults with additional customizations. To change the configuration file, specify the replacement implementation to be injected into a ConfigMap. @@ -961,6 +981,7 @@ Parameter | Description | Default value `set_vm_max_map_count` | Elasticsearch uses a **mmapfs** directory by default to store its indices. The default operating system limits on mmap counts is likely to be too low, which may result in out of memory exceptions. An init container is provided to set the value correctly, but this action requires privileged access. If privileged access is not allowed in your environment, you may increase this setting manually by updating the `vm.max_map_count` setting in **/etc/sysctl.conf** according to the Elasticsearch documentation and can set this parameter to `false` to disable the init container. For more information, see the [Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html). | `true` `set_data_owner_on_startup` | Set to true to enable an init container that runs a chown command on the mapped volume at startup to reset the owner of the ES data to the current user. This is needed if a random user is used to run the pod, but also requires privileges to change the ownership of files. | `false` `podAnnotations` | Configurable annotations applied to all Elasticsearch pods. | {} +`affinity` | You may optionally configure the pod affinity so that it is restricted to run on particular node(s), or to prefer to run on particular nodes. | `""` Additional env settings supported by Elasticsearch may be specified in a `custom.env` block as shown in the example below. @@ -1064,6 +1085,7 @@ Parameter | Description | Default value `image` | Reference the `platform/installer` Docker image that you downloaded and pushed to your Docker registry that your deployment can access. | `YOUR_INSTALLER_IMAGE:TAG` `imagePullPolicy` | Specify when to pull an image. | `IfNotPresent` `adminPassword` | Specify a temporary, initial password to log into the Pega application. This will need to be changed at first login. The adminPassword value cannot start with "@". | `"ADMIN_PASSWORD"` +`affinity` | Configures policy to assign the pods to the nodes. See the official [Kubernetes Documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). | `""` `upgrade.upgradeType:` |Specify the type of process, applying a patch or upgrading. | See the next table for details. `upgrade.upgradeSteps:` |Specify the steps of a `custom` upgrade process that you want to complete. For `zero-downtime`, `out-of-place-rules`, `out-of-place-data`, or `in-place` upgrades, leave this parameter empty. | `upgrade.targetRulesSchema:` |Specify the name of the schema you created the process creates for the new rules schema. | `""` @@ -1203,7 +1225,7 @@ Pega Infinity version | Clustering Service version | Description --- | --- | --- < 8.6 | NA | Clustering Service is not supported for releases 8.5 or below \>= 8.6 && < 8.8 | \= 1.0.5 | Pega Infinity 8.6.x and 8.7.x supports using a Pega-provided `platform-services/clustering-service` Docker Image that provides a clustering service version 1.0.3 or later. -\>= 8.8 | \= 1.3.3 | Pega Infinity 8.8 and later supports using a Pega-provided `platform-services/clustering-service` Docker Image that provides a clustering service version 1.3.0 or later. +\>= 8.8 | \= 1.3.x | Pega Infinity 8.8 and later supports using a Pega-provided `platform-services/clustering-service` Docker Image that provides a clustering service version 1.3.0 or later. As a best practice, use the latest available release of the clustering service. #### Configuration Settings @@ -1223,6 +1245,7 @@ Parameter | Description | Default value `hazelcast.username` | Configures the username to be used in a client-server Hazelcast model for authentication between the nodes in the Pega deployment and the nodes in the Hazelcast cluster. This parameter configures the username in Hazelcast cluster and your Pega nodes so authentication occurs automatically. | `""` `hazelcast.password` | Configures the password to be used in a client-server Hazelcast model for authentication between the nodes in the Pega deployment and the nodes in the Hazelcast cluster. This parameter configures the password credential in Hazelcast cluster and your Pega nodes so authentication occurs automatically. | `""` `hazelcast.external_secret_name` | If you configured a secret in an external secrets operator, enter the secret name. For details, see [this section](#optional-support-for-providing-credentialscertificates-using-external-secrets-operator). | `""` +`hazelcast.affinity` | Configures policy to assign the pods to the nodes. See the official [Kubernetes Documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). | `""` #### Example ```yaml @@ -1410,3 +1433,28 @@ behavior: scaleUp: stabilizationWindowSeconds: << provide scaleUp stabilization window in seconds >> ``` + +### Custom Ports + +You can optionally specify custom ports for deployment tier. You can specify custom ports for your tiers as shown in the example below: + +```yaml +tier: + - name: my-tier + custom: + ports: + - name: + containerPort: +``` + +You can optionally specify custom ports for tier specific service. You can specify custom ports for your service as shown in the example below: +```yaml +tier: + - name: my-tier + service: + customServicePorts: + - name: + port: + targetPort: + +``` diff --git a/charts/pega/charts/hazelcast/templates/_supplemental.tpl b/charts/pega/charts/hazelcast/templates/_supplemental.tpl index 20e01f3ef..01add432e 100644 --- a/charts/pega/charts/hazelcast/templates/_supplemental.tpl +++ b/charts/pega/charts/hazelcast/templates/_supplemental.tpl @@ -11,6 +11,7 @@ pega-db-secret-name pega-hz-secret-name deployDBSecret deployNonExtDBSecret +podAffinity secretResolver are copied from pega/templates/_helpers.tpl because helm lint requires charts to render standalone. See: https://github.com/helm/helm/issues/11260 for more details. */}} @@ -115,4 +116,11 @@ false name: {{ .extSecretName }} {{- end -}} {{- end -}} -{{- end -}} \ No newline at end of file +{{- end -}} + +{{- define "podAffinity" }} +{{- if .affinity }} +affinity: +{{- toYaml .affinity | nindent 2 }} +{{- end }} +{{ end }} \ No newline at end of file diff --git a/charts/pega/charts/hazelcast/templates/clustering-service-deployment.yaml b/charts/pega/charts/hazelcast/templates/clustering-service-deployment.yaml index 789dfbeb3..5a593bdfc 100644 --- a/charts/pega/charts/hazelcast/templates/clustering-service-deployment.yaml +++ b/charts/pega/charts/hazelcast/templates/clustering-service-deployment.yaml @@ -73,4 +73,5 @@ spec: {{- include "hazelcastVolumeTemplate" . | indent 6 }} imagePullSecrets: {{- include "imagePullSecrets" . | indent 6 }} +{{- include "podAffinity" .Values | indent 6 }} {{ end }} diff --git a/charts/pega/charts/hazelcast/templates/pega-hz-deployment.yaml b/charts/pega/charts/hazelcast/templates/pega-hz-deployment.yaml index 3abb7e813..494c116ca 100644 --- a/charts/pega/charts/hazelcast/templates/pega-hz-deployment.yaml +++ b/charts/pega/charts/hazelcast/templates/pega-hz-deployment.yaml @@ -69,4 +69,5 @@ spec: {{- include "hazelcastVolumeTemplate" . | indent 6 }} imagePullSecrets: {{- include "imagePullSecrets" . | indent 6 }} +{{- include "podAffinity" .Values | indent 6 }} {{ end }} diff --git a/charts/pega/charts/installer/templates/_pega-installer-job.tpl b/charts/pega/charts/installer/templates/_pega-installer-job.tpl index 5471ae9c0..df0834346 100644 --- a/charts/pega/charts/installer/templates/_pega-installer-job.tpl +++ b/charts/pega/charts/installer/templates/_pega-installer-job.tpl @@ -188,5 +188,6 @@ spec: restartPolicy: Never imagePullSecrets: {{- include "imagePullSecrets" .root | indent 6 }} +{{- include "podAffinity" .root.Values | indent 6 }} --- {{- end -}} \ No newline at end of file diff --git a/charts/pega/charts/installer/templates/_supplemental.tpl b/charts/pega/charts/installer/templates/_supplemental.tpl index 20e01f3ef..01add432e 100644 --- a/charts/pega/charts/installer/templates/_supplemental.tpl +++ b/charts/pega/charts/installer/templates/_supplemental.tpl @@ -11,6 +11,7 @@ pega-db-secret-name pega-hz-secret-name deployDBSecret deployNonExtDBSecret +podAffinity secretResolver are copied from pega/templates/_helpers.tpl because helm lint requires charts to render standalone. See: https://github.com/helm/helm/issues/11260 for more details. */}} @@ -115,4 +116,11 @@ false name: {{ .extSecretName }} {{- end -}} {{- end -}} -{{- end -}} \ No newline at end of file +{{- end -}} + +{{- define "podAffinity" }} +{{- if .affinity }} +affinity: +{{- toYaml .affinity | nindent 2 }} +{{- end }} +{{ end }} \ No newline at end of file diff --git a/charts/pega/charts/installer/values.yaml b/charts/pega/charts/installer/values.yaml index fb7ad3d1a..579c44dd1 100644 --- a/charts/pega/charts/installer/values.yaml +++ b/charts/pega/charts/installer/values.yaml @@ -83,14 +83,17 @@ upgrade: # Specify automaticResumeEnabled to support resuming rules_upgrade from point of failure. You can use this functionality only when you use "custom" upgradeType. automaticResumeEnabled: "false" -# Memory and CPU settings for installer +# Memory, CPU, and ephemeral storage settings for installer +# Ephemeral storage recommended size is 10G resources: requests: memory: "5Gi" cpu: 1 + # ephemeralStorage: "" limits: memory: "6Gi" cpu: 2 + # ephemeralStorage: "" shareProcessNamespace: false diff --git a/charts/pega/charts/pegasearch/templates/_supplemental.tpl b/charts/pega/charts/pegasearch/templates/_supplemental.tpl index 20e01f3ef..01add432e 100644 --- a/charts/pega/charts/pegasearch/templates/_supplemental.tpl +++ b/charts/pega/charts/pegasearch/templates/_supplemental.tpl @@ -11,6 +11,7 @@ pega-db-secret-name pega-hz-secret-name deployDBSecret deployNonExtDBSecret +podAffinity secretResolver are copied from pega/templates/_helpers.tpl because helm lint requires charts to render standalone. See: https://github.com/helm/helm/issues/11260 for more details. */}} @@ -115,4 +116,11 @@ false name: {{ .extSecretName }} {{- end -}} {{- end -}} -{{- end -}} \ No newline at end of file +{{- end -}} + +{{- define "podAffinity" }} +{{- if .affinity }} +affinity: +{{- toYaml .affinity | nindent 2 }} +{{- end }} +{{ end }} \ No newline at end of file diff --git a/charts/pega/charts/pegasearch/templates/pega-search-deployment.yaml b/charts/pega/charts/pegasearch/templates/pega-search-deployment.yaml index 389bff149..055f81a89 100644 --- a/charts/pega/charts/pegasearch/templates/pega-search-deployment.yaml +++ b/charts/pega/charts/pegasearch/templates/pega-search-deployment.yaml @@ -117,6 +117,7 @@ spec: mountPath: /usr/share/elasticsearch/data imagePullSecrets: {{- include "imagePullSecrets" . | indent 6 }} +{{- include "podAffinity" .Values | indent 6 }} volumeClaimTemplates: - metadata: name: esstorage diff --git a/charts/pega/templates/_helpers.tpl b/charts/pega/templates/_helpers.tpl index 5f3597921..74377ad14 100644 --- a/charts/pega/templates/_helpers.tpl +++ b/charts/pega/templates/_helpers.tpl @@ -530,4 +530,4 @@ servicePort: use-annotation - secret: name: {{ include "pega-diagnostic-secret-name" $}} -{{- end}} +{{- end}} \ No newline at end of file diff --git a/charts/pega/templates/_pega-deployment.tpl b/charts/pega/templates/_pega-deployment.tpl index 2266ce947..7be6b9ea4 100644 --- a/charts/pega/templates/_pega-deployment.tpl +++ b/charts/pega/templates/_pega-deployment.tpl @@ -337,6 +337,7 @@ spec: # If the image is in a protected registry, you must specify a secret to access it. imagePullSecrets: {{- include "imagePullSecrets" .root | indent 6 }} +{{- include "podAffinity" .node | indent 6 }} {{- if (.node.volumeClaimTemplate) }} volumeClaimTemplates: - metadata: diff --git a/charts/pega/templates/_pega-service.tpl b/charts/pega/templates/_pega-service.tpl index 5c7dc31e0..e987d973f 100644 --- a/charts/pega/templates/_pega-service.tpl +++ b/charts/pega/templates/_pega-service.tpl @@ -60,6 +60,9 @@ spec: - name: https port: {{ .node.service.tls.port }} targetPort: {{ .node.service.tls.targetPort }} +{{- end }} +{{- if .node.service.customServicePorts }} +{{ toYaml .node.service.customServicePorts | indent 2 }} {{- end }} selector: app: {{ .name }} diff --git a/charts/pega/templates/_supplemental.tpl b/charts/pega/templates/_supplemental.tpl index 20e01f3ef..01add432e 100644 --- a/charts/pega/templates/_supplemental.tpl +++ b/charts/pega/templates/_supplemental.tpl @@ -11,6 +11,7 @@ pega-db-secret-name pega-hz-secret-name deployDBSecret deployNonExtDBSecret +podAffinity secretResolver are copied from pega/templates/_helpers.tpl because helm lint requires charts to render standalone. See: https://github.com/helm/helm/issues/11260 for more details. */}} @@ -115,4 +116,11 @@ false name: {{ .extSecretName }} {{- end -}} {{- end -}} -{{- end -}} \ No newline at end of file +{{- end -}} + +{{- define "podAffinity" }} +{{- if .affinity }} +affinity: +{{- toYaml .affinity | nindent 2 }} +{{- end }} +{{ end }} \ No newline at end of file diff --git a/charts/pega/values-large.yaml b/charts/pega/values-large.yaml index 1a819a83e..602b3a783 100644 --- a/charts/pega/values-large.yaml +++ b/charts/pega/values-large.yaml @@ -26,7 +26,7 @@ global: # Add krb5.conf file content here. # Feature is used for Decisioning data flows to fetch data from Kafka or HBase streams - kerberos: + kerberos: {} # If a storage class to be passed to the VolumeClaimTemplates in search and stream pods, it can be specified here: storageClassName: "" diff --git a/charts/pega/values-minimal.yaml b/charts/pega/values-minimal.yaml index 8445d910a..e61d2db8f 100755 --- a/charts/pega/values-minimal.yaml +++ b/charts/pega/values-minimal.yaml @@ -22,7 +22,7 @@ global: # Add krb5.conf file content here. # Feature is used for Decisioning data flows to fetch data from Kafka or HBase streams - kerberos: + kerberos: {} # If a storage class to be passed to the VolumeClaimTemplates in search and stream pods, it can be specified here: storageClassName: "" diff --git a/charts/pega/values.yaml b/charts/pega/values.yaml index 10b2a3a11..246fd39de 100644 --- a/charts/pega/values.yaml +++ b/charts/pega/values.yaml @@ -26,7 +26,7 @@ global: # Add krb5.conf file content here. # Feature is used for Decisioning data flows to fetch data from Kafka or HBase streams - kerberos: + kerberos: {} # If a storage class to be passed to the VolumeClaimTemplates in search and stream pods, it can be specified here: storageClassName: "" @@ -163,6 +163,13 @@ global: # loadBalancerSourceRanges: # - "123.123.123.0/24" # - "128.128.128.64/32" + + # Define custom ports for service here. If you want to use the custom ports for other services, please use the same configuration for those services. + # customServicePorts: + # - name: + # port: + # targetPort: + # To configure TLS between the ingress/load balancer and the backend, set the following: tls: enabled: false diff --git a/terratest/src/test/backingservices/constellation-messaging-deployment_test.go b/terratest/src/test/backingservices/constellation-messaging-deployment_test.go new file mode 100644 index 000000000..868a8dba6 --- /dev/null +++ b/terratest/src/test/backingservices/constellation-messaging-deployment_test.go @@ -0,0 +1,35 @@ +package backingservices + +import ( + "testing" + + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" +) + +func TestConstellationMessagingDeploymentWithAffinity(t *testing.T) { + + var affintiyBasePath = "constellation-messaging.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0]." + + helmChartParser := NewHelmConfigParser( + NewHelmTestFromTemplate(t, helmChartRelativePath, map[string]string{ + "constellation-messaging.enabled": "true", + "constellation-messaging.name": "constellation-messaging", + affintiyBasePath + "key": "kubernetes.io/os", + affintiyBasePath + "operator": "In", + affintiyBasePath + "values[0]": "linux", + }, + []string{"charts/constellation-messaging/templates/messaging-deployment.yaml"}), + ) + + var cllnMessagingDeploymentObj appsv1.Deployment + helmChartParser.getResourceYAML(SearchResourceOption{ + Name: "constellation-messaging", + Kind: "Deployment", + }, &cllnMessagingDeploymentObj) + + deploymentAffinity := cllnMessagingDeploymentObj.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution + require.Equal(t, "kubernetes.io/os", deploymentAffinity.NodeSelectorTerms[0].MatchExpressions[0].Key) + require.Equal(t, "In", string(deploymentAffinity.NodeSelectorTerms[0].MatchExpressions[0].Operator)) + require.Equal(t, "linux", deploymentAffinity.NodeSelectorTerms[0].MatchExpressions[0].Values[0]) +} diff --git a/terratest/src/test/backingservices/constellation-static-depoyment_test.go b/terratest/src/test/backingservices/constellation-static-depoyment_test.go new file mode 100644 index 000000000..bb8ee18ac --- /dev/null +++ b/terratest/src/test/backingservices/constellation-static-depoyment_test.go @@ -0,0 +1,34 @@ +package backingservices + +import ( + "testing" + + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" +) + +func TestConstellationStaticDeploymentWithAffinity(t *testing.T) { + + var affintiyBasePath = "constellation.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0]." + + helmChartParser := NewHelmConfigParser( + NewHelmTestFromTemplate(t, helmChartRelativePath, map[string]string{ + "constellation.enabled": "true", + affintiyBasePath + "key": "kubernetes.io/os", + affintiyBasePath + "operator": "In", + affintiyBasePath + "values[0]": "linux", + }, + []string{"charts/constellation/templates/clln-deployment.yaml"}), + ) + + var cllnDeploymentObj appsv1.Deployment + helmChartParser.getResourceYAML(SearchResourceOption{ + Name: "constellation", + Kind: "Deployment", + }, &cllnDeploymentObj) + + deploymentAffinity := cllnDeploymentObj.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution + require.Equal(t, "kubernetes.io/os", deploymentAffinity.NodeSelectorTerms[0].MatchExpressions[0].Key) + require.Equal(t, "In", string(deploymentAffinity.NodeSelectorTerms[0].MatchExpressions[0].Operator)) + require.Equal(t, "linux", deploymentAffinity.NodeSelectorTerms[0].MatchExpressions[0].Values[0]) +} diff --git a/terratest/src/test/backingservices/srs-deployment_test.go b/terratest/src/test/backingservices/srs-deployment_test.go index b7c046254..482068615 100644 --- a/terratest/src/test/backingservices/srs-deployment_test.go +++ b/terratest/src/test/backingservices/srs-deployment_test.go @@ -1,29 +1,30 @@ package backingservices import ( + "strings" + "testing" + "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" k8score "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/intstr" - "testing" - "strings" ) -func TestSRSDeployment(t *testing.T){ +func TestSRSDeployment(t *testing.T) { helmChartParser := NewHelmConfigParser( NewHelmTestFromTemplate(t, helmChartRelativePath, map[string]string{ - "srs.enabled": "true", - "srs.deploymentName": "test-srs", - "global.imageCredentials.registry": "docker-registry.io", - "srs.srsRuntime.replicaCount": "1", - "srs.srsRuntime.srsImage": "platform-services/search-n-reporting-service:latest", - "srs.srsRuntime.env.AuthEnabled": "false", - "srs.srsRuntime.env.OAuthPublicKeyURL": "", - "srs.srsStorage.tls.enabled": "true", + "srs.enabled": "true", + "srs.deploymentName": "test-srs", + "global.imageCredentials.registry": "docker-registry.io", + "srs.srsRuntime.replicaCount": "1", + "srs.srsRuntime.srsImage": "platform-services/search-n-reporting-service:latest", + "srs.srsRuntime.env.AuthEnabled": "false", + "srs.srsRuntime.env.OAuthPublicKeyURL": "", + "srs.srsStorage.tls.enabled": "true", "srs.srsStorage.basicAuthentication.enabled": "false", }, - []string{"charts/srs/templates/srsservice_deployment.yaml"}), + []string{"charts/srs/templates/srsservice_deployment.yaml"}), ) var srsDeploymentObj appsv1.Deployment @@ -40,7 +41,7 @@ func TestSRSDeployment(t *testing.T){ "false", "", false, - podResources{ "1300m", "2Gi", "650m", "2Gi"}, + podResources{"1300m", "2Gi", "650m", "2Gi"}, esDomain{ domain: "elasticsearch-master.default.svc", port: "9200", @@ -50,32 +51,32 @@ func TestSRSDeployment(t *testing.T){ }) } -func TestSRSDeploymentVariables(t *testing.T){ +func TestSRSDeploymentVariables(t *testing.T) { helmChartParser := NewHelmConfigParser( NewHelmTestFromTemplate(t, helmChartRelativePath, map[string]string{ - "srs.enabled": "true", - "srs.deploymentName": "test-srs-dev", - "global.imageCredentials.registry": "docker-registry.io", - "srs.srsRuntime.replicaCount": "3", - "srs.srsRuntime.srsImage": "platform-services/search-n-reporting-service:1.0.0", - "srs.srsRuntime.imagePullSecretNames": "{secret1, secret2}", - "srs.srsRuntime.env.AuthEnabled": "true", - "srs.srsRuntime.env.OAuthPublicKeyURL": "https://acme.authenticator.com/OAuthPublicKeyURL", - "srs.srsRuntime.resources.limits.cpu": "2", - "srs.srsRuntime.resources.limits.memory": "4Gi", - "srs.srsRuntime.resources.requests.cpu": "1", - "srs.srsRuntime.resources.requests.memory": "2Gi", - "srs.srsStorage.provisionInternalESCluster": "false", - "srs.srsStorage.tls.enabled": "false", - "srs.srsStorage.domain": "es-id.managed.cloudiest.io", - "srs.srsStorage.port": "443", - "srs.srsStorage.protocol": "https", - "srs.srsStorage.awsIAM.region": "us-east-1", - "srs.srsStorage.requireInternetAccess": "true", + "srs.enabled": "true", + "srs.deploymentName": "test-srs-dev", + "global.imageCredentials.registry": "docker-registry.io", + "srs.srsRuntime.replicaCount": "3", + "srs.srsRuntime.srsImage": "platform-services/search-n-reporting-service:1.0.0", + "srs.srsRuntime.imagePullSecretNames": "{secret1, secret2}", + "srs.srsRuntime.env.AuthEnabled": "true", + "srs.srsRuntime.env.OAuthPublicKeyURL": "https://acme.authenticator.com/OAuthPublicKeyURL", + "srs.srsRuntime.resources.limits.cpu": "2", + "srs.srsRuntime.resources.limits.memory": "4Gi", + "srs.srsRuntime.resources.requests.cpu": "1", + "srs.srsRuntime.resources.requests.memory": "2Gi", + "srs.srsStorage.provisionInternalESCluster": "false", + "srs.srsStorage.tls.enabled": "false", + "srs.srsStorage.domain": "es-id.managed.cloudiest.io", + "srs.srsStorage.port": "443", + "srs.srsStorage.protocol": "https", + "srs.srsStorage.awsIAM.region": "us-east-1", + "srs.srsStorage.requireInternetAccess": "true", "srs.srsStorage.basicAuthentication.enabled": "false", }, - []string{"charts/srs/templates/srsservice_deployment.yaml"}), + []string{"charts/srs/templates/srsservice_deployment.yaml"}), ) var srsDeploymentObj appsv1.Deployment @@ -97,36 +98,36 @@ func TestSRSDeploymentVariables(t *testing.T){ domain: "es-id.managed.cloudiest.io", port: "443", protocol: "https", - region: "us-east-1", + region: "us-east-1", }, true, }) } -func TestSRSDeploymentVariablesDefaultInternetEgress(t *testing.T){ +func TestSRSDeploymentVariablesDefaultInternetEgress(t *testing.T) { helmChartParser := NewHelmConfigParser( NewHelmTestFromTemplate(t, helmChartRelativePath, map[string]string{ - "srs.enabled": "true", - "srs.deploymentName": "test-srs-dev", - "global.imageCredentials.registry": "docker-registry.io", - "srs.srsRuntime.replicaCount": "3", - "srs.srsRuntime.srsImage": "platform-services/search-n-reporting-service:1.0.0", - "srs.srsRuntime.imagePullSecretNames": "{secret1, secret2}", - "srs.srsRuntime.env.AuthEnabled": "true", - "srs.srsRuntime.env.OAuthPublicKeyURL": "https://acme.authenticator.com/OAuthPublicKeyURL", - "srs.srsRuntime.resources.limits.cpu": "2", - "srs.srsRuntime.resources.limits.memory": "4Gi", - "srs.srsRuntime.resources.requests.cpu": "1", - "srs.srsRuntime.resources.requests.memory": "2Gi", - "srs.srsStorage.provisionInternalESCluster": "false", - "srs.srsStorage.domain": "es-id.managed.cloudiest.io", - "srs.srsStorage.port": "443", - "srs.srsStorage.protocol": "https", - "srs.srsStorage.tls.enabled": "false", + "srs.enabled": "true", + "srs.deploymentName": "test-srs-dev", + "global.imageCredentials.registry": "docker-registry.io", + "srs.srsRuntime.replicaCount": "3", + "srs.srsRuntime.srsImage": "platform-services/search-n-reporting-service:1.0.0", + "srs.srsRuntime.imagePullSecretNames": "{secret1, secret2}", + "srs.srsRuntime.env.AuthEnabled": "true", + "srs.srsRuntime.env.OAuthPublicKeyURL": "https://acme.authenticator.com/OAuthPublicKeyURL", + "srs.srsRuntime.resources.limits.cpu": "2", + "srs.srsRuntime.resources.limits.memory": "4Gi", + "srs.srsRuntime.resources.requests.cpu": "1", + "srs.srsRuntime.resources.requests.memory": "2Gi", + "srs.srsStorage.provisionInternalESCluster": "false", + "srs.srsStorage.domain": "es-id.managed.cloudiest.io", + "srs.srsStorage.port": "443", + "srs.srsStorage.protocol": "https", + "srs.srsStorage.tls.enabled": "false", "srs.srsStorage.basicAuthentication.enabled": "false", }, - []string{"charts/srs/templates/srsservice_deployment.yaml"}), + []string{"charts/srs/templates/srsservice_deployment.yaml"}), ) var srsDeploymentObj appsv1.Deployment @@ -153,8 +154,46 @@ func TestSRSDeploymentVariablesDefaultInternetEgress(t *testing.T){ }) } +func TestSRSDeploymentWithAffinity(t *testing.T) { + + var affintiyBasePath = "srs.srsRuntime.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0]." + + helmChartParser := NewHelmConfigParser( + NewHelmTestFromTemplate(t, helmChartRelativePath, map[string]string{ + "srs.enabled": "true", + "srs.deploymentName": "test-srs", + "global.imageCredentials.registry": "docker-registry.io", + "srs.srsRuntime.replicaCount": "1", + "srs.srsRuntime.srsImage": "platform-services/search-n-reporting-service:latest", + "srs.srsRuntime.env.AuthEnabled": "false", + "srs.srsRuntime.env.OAuthPublicKeyURL": "", + "srs.srsStorage.tls.enabled": "true", + "srs.srsStorage.basicAuthentication.enabled": "false", + affintiyBasePath + "key": "kubernetes.io/os", + affintiyBasePath + "operator": "In", + affintiyBasePath + "values[0]": "linux", + }, + []string{"charts/srs/templates/srsservice_deployment.yaml"}), + ) + + var srsDeploymentObj appsv1.Deployment + helmChartParser.getResourceYAML(SearchResourceOption{ + Name: "test-srs", + Kind: "Deployment", + }, &srsDeploymentObj) + + deploymentSpec := srsDeploymentObj.Spec.Template.Spec + require.Equal(t, deploymentSpec.Containers[0].Name, "srs-service") + require.Equal(t, deploymentSpec.Containers[0].Image, "platform-services/search-n-reporting-service:latest") + require.Equal(t, deploymentSpec.Containers[0].Ports[0].Name, "srs-port") + deploymentAffinity := deploymentSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution + require.Equal(t, "kubernetes.io/os", deploymentAffinity.NodeSelectorTerms[0].MatchExpressions[0].Key) + require.Equal(t, "In", string(deploymentAffinity.NodeSelectorTerms[0].MatchExpressions[0].Operator)) + require.Equal(t, "linux", deploymentAffinity.NodeSelectorTerms[0].MatchExpressions[0].Values[0]) +} + func VerifySRSDeployment(t *testing.T, deploymentObj appsv1.Deployment, expectedDeployment srsDeployment) { - require.Equal(t, expectedDeployment.replicaCount, *deploymentObj.Spec.Replicas ) + require.Equal(t, expectedDeployment.replicaCount, *deploymentObj.Spec.Replicas) require.Equal(t, expectedDeployment.appName, deploymentObj.Spec.Selector.MatchLabels["app.kubernetes.io/name"]) if expectedDeployment.internetEgress { require.Equal(t, "true", deploymentObj.Spec.Selector.MatchLabels["networking/allow-internet-egress"]) @@ -171,7 +210,7 @@ func VerifyDeployment(t *testing.T, pod *k8score.PodSpec, expectedSpec srsDeploy require.Equal(t, pod.Containers[0].Ports[0].Name, "srs-port") require.Equal(t, pod.Containers[0].Ports[0].ContainerPort, int32(8080)) var envIndex int32 = 0 - require.Equal(t, "ELASTICSEARCH_HOST", pod.Containers[0].Env[envIndex].Name ) + require.Equal(t, "ELASTICSEARCH_HOST", pod.Containers[0].Env[envIndex].Name) require.Equal(t, expectedSpec.elasticsearchEndPoint.domain, pod.Containers[0].Env[envIndex].Value) envIndex++ require.Equal(t, "ELASTICSEARCH_PORT", pod.Containers[0].Env[envIndex].Name) @@ -185,39 +224,39 @@ func VerifyDeployment(t *testing.T, pod *k8score.PodSpec, expectedSpec srsDeploy var authProvider string = pod.Containers[0].Env[envIndex].Value envIndex++ if strings.EqualFold("aws-iam", authProvider) { - require.Equal(t, "ELASTICSEARCH_REGION", pod.Containers[0].Env[envIndex].Name) - require.Equal(t, expectedSpec.elasticsearchEndPoint.region, pod.Containers[0].Env[envIndex].Value) - envIndex++ + require.Equal(t, "ELASTICSEARCH_REGION", pod.Containers[0].Env[envIndex].Name) + require.Equal(t, expectedSpec.elasticsearchEndPoint.region, pod.Containers[0].Env[envIndex].Value) + envIndex++ } if strings.EqualFold("basic-authentication", authProvider) { - require.Equal(t, "ELASTICSEARCH_USERNAME", pod.Containers[0].Env[envIndex].Name) - require.Equal(t, "srs-elastic-credentials", pod.Containers[0].Env[envIndex].ValueFrom.SecretKeyRef.Name) - require.Equal(t, "username", pod.Containers[0].Env[envIndex].ValueFrom.SecretKeyRef.Key) - envIndex++ - require.Equal(t, "ELASTICSEARCH_PASSWORD", pod.Containers[0].Env[envIndex].Name) - require.Equal(t, "srs-elastic-credentials", pod.Containers[0].Env[envIndex].ValueFrom.SecretKeyRef.Name) - require.Equal(t, "password", pod.Containers[0].Env[envIndex].ValueFrom.SecretKeyRef.Key) - envIndex++ + require.Equal(t, "ELASTICSEARCH_USERNAME", pod.Containers[0].Env[envIndex].Name) + require.Equal(t, "srs-elastic-credentials", pod.Containers[0].Env[envIndex].ValueFrom.SecretKeyRef.Name) + require.Equal(t, "username", pod.Containers[0].Env[envIndex].ValueFrom.SecretKeyRef.Key) + envIndex++ + require.Equal(t, "ELASTICSEARCH_PASSWORD", pod.Containers[0].Env[envIndex].Name) + require.Equal(t, "srs-elastic-credentials", pod.Containers[0].Env[envIndex].ValueFrom.SecretKeyRef.Name) + require.Equal(t, "password", pod.Containers[0].Env[envIndex].ValueFrom.SecretKeyRef.Key) + envIndex++ } if strings.EqualFold("tls", authProvider) { - require.Equal(t, "ELASTICSEARCH_USERNAME", pod.Containers[0].Env[envIndex].Name) - require.Equal(t, "srs-elastic-credentials", pod.Containers[0].Env[envIndex].ValueFrom.SecretKeyRef.Name) - require.Equal(t, "username", pod.Containers[0].Env[envIndex].ValueFrom.SecretKeyRef.Key) - envIndex++ - require.Equal(t, "ELASTICSEARCH_PASSWORD", pod.Containers[0].Env[envIndex].Name) - require.Equal(t, "srs-elastic-credentials", pod.Containers[0].Env[envIndex].ValueFrom.SecretKeyRef.Name) - require.Equal(t, "password", pod.Containers[0].Env[envIndex].ValueFrom.SecretKeyRef.Key) - envIndex++ - require.Equal(t, "PATH_TO_TRUSTSTORE", pod.Containers[0].Env[envIndex].Name) - require.Equal(t, "/usr/share/elastic-certificates.p12", pod.Containers[0].Env[envIndex].Value) - envIndex++ - require.Equal(t, "PATH_TO_KEYSTORE", pod.Containers[0].Env[envIndex].Name) - require.Equal(t, "", pod.Containers[0].Env[envIndex].Value) - envIndex++ + require.Equal(t, "ELASTICSEARCH_USERNAME", pod.Containers[0].Env[envIndex].Name) + require.Equal(t, "srs-elastic-credentials", pod.Containers[0].Env[envIndex].ValueFrom.SecretKeyRef.Name) + require.Equal(t, "username", pod.Containers[0].Env[envIndex].ValueFrom.SecretKeyRef.Key) + envIndex++ + require.Equal(t, "ELASTICSEARCH_PASSWORD", pod.Containers[0].Env[envIndex].Name) + require.Equal(t, "srs-elastic-credentials", pod.Containers[0].Env[envIndex].ValueFrom.SecretKeyRef.Name) + require.Equal(t, "password", pod.Containers[0].Env[envIndex].ValueFrom.SecretKeyRef.Key) + envIndex++ + require.Equal(t, "PATH_TO_TRUSTSTORE", pod.Containers[0].Env[envIndex].Name) + require.Equal(t, "/usr/share/elastic-certificates.p12", pod.Containers[0].Env[envIndex].Value) + envIndex++ + require.Equal(t, "PATH_TO_KEYSTORE", pod.Containers[0].Env[envIndex].Name) + require.Equal(t, "", pod.Containers[0].Env[envIndex].Value) + envIndex++ } require.Equal(t, "APPLICATION_HOST", pod.Containers[0].Env[envIndex].Name) - require.Equal(t, "0.0.0.0", pod.Containers[0].Env[envIndex].Value) - envIndex++ + require.Equal(t, "0.0.0.0", pod.Containers[0].Env[envIndex].Value) + envIndex++ require.Equal(t, "APPLICATION_PORT", pod.Containers[0].Env[envIndex].Name) require.Equal(t, "8080", pod.Containers[0].Env[envIndex].Value) envIndex++ @@ -240,36 +279,39 @@ func VerifyDeployment(t *testing.T, pod *k8score.PodSpec, expectedSpec srsDeploy require.Equal(t, pod.Containers[0].ReadinessProbe.HTTPGet.Port, intstr.FromString("srs-port")) require.Equal(t, pod.Containers[0].ReadinessProbe.HTTPGet.Scheme, k8score.URIScheme("HTTP")) - require.Equal(t, pod.ImagePullSecrets[0].Name, expectedSpec.name + "-reg-secret") + require.Equal(t, pod.ImagePullSecrets[0].Name, expectedSpec.name+"-reg-secret") if expectedSpec.imagePullSecretNames { require.Equal(t, pod.ImagePullSecrets[1].Name, "secret1") require.Equal(t, pod.ImagePullSecrets[2].Name, "secret2") } + + podAffinity := pod.Affinity + require.Empty(t, podAffinity) } type srsDeployment struct { - name string - appName string - replicaCount int32 - imageURI string - authEnabled string - oauthPublicKeyURL string - internetEgress bool - podLimits podResources - elasticsearchEndPoint esDomain - imagePullSecretNames bool + name string + appName string + replicaCount int32 + imageURI string + authEnabled string + oauthPublicKeyURL string + internetEgress bool + podLimits podResources + elasticsearchEndPoint esDomain + imagePullSecretNames bool } type podResources struct { - cpuLimit string - memoryLimit string - cpuRequest string - memoryRequest string + cpuLimit string + memoryLimit string + cpuRequest string + memoryRequest string } type esDomain struct { - domain string - port string - protocol string - region string + domain string + port string + protocol string + region string } diff --git a/terratest/src/test/pega/clustering-service-deployment_test.go b/terratest/src/test/pega/clustering-service-deployment_test.go index 5d772a92a..271f92ecd 100644 --- a/terratest/src/test/pega/clustering-service-deployment_test.go +++ b/terratest/src/test/pega/clustering-service-deployment_test.go @@ -62,6 +62,8 @@ func VerifyClusteringServiceDeployment(t *testing.T, yamlContent string) { require.Equal(t, statefulsetSpec.Containers[0].VolumeMounts[0].MountPath, "/opt/hazelcast/logs") require.Equal(t, statefulsetSpec.Containers[0].VolumeMounts[1].Name, "hazelcast-volume-credentials") require.Equal(t, statefulsetSpec.Containers[0].VolumeMounts[1].MountPath, "/opt/hazelcast/secrets") + statefulsetAffinity := statefulsetObj.Spec.Template.Spec.Affinity + require.Empty(t, statefulsetAffinity) } } } @@ -98,3 +100,43 @@ func TestClusteringServiceDeploymentSecurityContext(t *testing.T) { } } } + +func TestClusteringServiceDeploymentWithAffinity(t *testing.T) { + var supportedVendors = []string{"k8s", "openshift", "eks", "gke", "aks", "pks"} + var supportedOperations = []string{"deploy", "install-deploy"} + + helmChartPath, err := filepath.Abs(PegaHelmChartPath) + require.NoError(t, err) + + var affintiyBasePath = "hazelcast.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0]." + + for _, vendor := range supportedVendors { + + for _, operation := range supportedOperations { + + fmt.Println(vendor + "-" + operation) + + var options = &helm.Options{ + SetValues: map[string]string{ + "global.provider": vendor, + "global.actions.execute": operation, + "hazelcast.clusteringServiceEnabled": "true", + affintiyBasePath + "key": "kubernetes.io/os", + affintiyBasePath + "operator": "In", + affintiyBasePath + "values[0]": "linux", + }, + } + + yamlContent := RenderTemplate(t, options, helmChartPath, []string{"charts/hazelcast/templates/clustering-service-deployment.yaml"}) + yamlSplit := strings.Split(yamlContent, "---") + var statefulsetObj appsv1beta2.StatefulSet + UnmarshalK8SYaml(t, yamlSplit[1], &statefulsetObj) + + statefulsetAffinity := statefulsetObj.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution + require.Equal(t, "kubernetes.io/os", statefulsetAffinity.NodeSelectorTerms[0].MatchExpressions[0].Key) + require.Equal(t, "In", string(statefulsetAffinity.NodeSelectorTerms[0].MatchExpressions[0].Operator)) + require.Equal(t, "linux", statefulsetAffinity.NodeSelectorTerms[0].MatchExpressions[0].Values[0]) + + } + } +} diff --git a/terratest/src/test/pega/data/values_service_custom_ports.yaml b/terratest/src/test/pega/data/values_service_custom_ports.yaml new file mode 100644 index 000000000..329cd7fab --- /dev/null +++ b/terratest/src/test/pega/data/values_service_custom_ports.yaml @@ -0,0 +1,52 @@ +--- +global: + tier: + - name: "web" + nodeType: "WebUser" + requestor: + passivationTimeSec: 900 + service: + httpEnabled: true + port: 80 + targetPort: 8080 + tls: + enabled: false + external_secret_name: "" + keystore: + keystorepassword: + port: 443 + targetPort: 8443 + cacertificate: + certificateFile: + certificateKeyFile: + traefik: + enabled: false + serverName: "" + insecureSkipVerify: false + customServicePorts: + - name: port1 + port: 5005 + targetPort: 5005 + ingress: + domain: + tls: + enabled: true + certificate: + key: + cacertificate: + replicas: 1 + javaOpts: "" + pegaDiagnosticUser: "" + pegaDiagnosticPassword: "" + deploymentStrategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + livenessProbe: + port: 8081 + hpa: + enabled: true + pdb: + enabled: false + minAvailable: 1 diff --git a/terratest/src/test/pega/pega-hz-deployment_test.go b/terratest/src/test/pega/pega-hz-deployment_test.go index 43501dc6b..172b5dd4a 100644 --- a/terratest/src/test/pega/pega-hz-deployment_test.go +++ b/terratest/src/test/pega/pega-hz-deployment_test.go @@ -1,13 +1,14 @@ package pega import ( + "path/filepath" + "strings" + "testing" + "github.com/gruntwork-io/terratest/modules/helm" "github.com/stretchr/testify/require" appsv1beta2 "k8s.io/api/apps/v1beta2" "k8s.io/apimachinery/pkg/util/intstr" - "path/filepath" - "strings" - "testing" ) func TestHazelcastDeployment(t *testing.T) { @@ -58,6 +59,55 @@ func VerifyHazelcastDeployment(t *testing.T, yamlContent string) { require.Equal(t, statefulsetSpec.Containers[0].VolumeMounts[0].MountPath, "/opt/hazelcast/logs") require.Equal(t, statefulsetSpec.Containers[0].VolumeMounts[1].Name, "hazelcast-volume-credentials") require.Equal(t, statefulsetSpec.Containers[0].VolumeMounts[1].MountPath, "/opt/hazelcast/secrets") + statefulsetAffinity := statefulsetObj.Spec.Template.Spec.Affinity + require.Empty(t, statefulsetAffinity) + } + } +} + +func TestHazelcastDeploymentWithPodAffinity(t *testing.T) { + var supportedVendors = []string{"k8s", "openshift", "eks", "gke", "aks", "pks"} + var supportedOperations = []string{"deploy", "install-deploy"} + + helmChartPath, err := filepath.Abs(PegaHelmChartPath) + require.NoError(t, err) + + var affintiyBasePath = "hazelcast.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0]." + + for _, vendor := range supportedVendors { + + for _, operation := range supportedOperations { + + var options = &helm.Options{ + SetValues: map[string]string{ + "global.provider": vendor, + "global.actions.execute": operation, + "hazelcast.enabled": "true", + affintiyBasePath + "key": "kubernetes.io/os", + affintiyBasePath + "operator": "In", + affintiyBasePath + "values[0]": "linux", + }, + } + + yamlContent := RenderTemplate(t, options, helmChartPath, []string{"charts/hazelcast/templates/pega-hz-deployment.yaml"}) + VerifyHazelcastDeploymentWithAffinity(t, yamlContent, options) + + } + } +} + +func VerifyHazelcastDeploymentWithAffinity(t *testing.T, yamlContent string, options *helm.Options) { + var statefulsetObj appsv1beta2.StatefulSet + statefulSlice := strings.Split(yamlContent, "---") + for index, statefulInfo := range statefulSlice { + if index >= 1 { + UnmarshalK8SYaml(t, statefulInfo, &statefulsetObj) + require.Equal(t, *statefulsetObj.Spec.Replicas, int32(3)) + require.Equal(t, statefulsetObj.Spec.ServiceName, "pega-hazelcast-service") + statefulsetAffinity := statefulsetObj.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution + require.Equal(t, "kubernetes.io/os", statefulsetAffinity.NodeSelectorTerms[0].MatchExpressions[0].Key) + require.Equal(t, "In", string(statefulsetAffinity.NodeSelectorTerms[0].MatchExpressions[0].Operator)) + require.Equal(t, "linux", statefulsetAffinity.NodeSelectorTerms[0].MatchExpressions[0].Values[0]) } } } diff --git a/terratest/src/test/pega/pega-installer-job_test.go b/terratest/src/test/pega/pega-installer-job_test.go index 0a892f9a9..5a43e97f4 100644 --- a/terratest/src/test/pega/pega-installer-job_test.go +++ b/terratest/src/test/pega/pega-installer-job_test.go @@ -106,6 +106,39 @@ func TestPegaInstallerJobWithNodeSelector(t *testing.T) { } +func TestPegaInstallerJobWithAffinity(t *testing.T) { + + var affintiyBasePath = "installer.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0]." + + var options = &helm.Options{ + SetValues: map[string]string{ + "global.deployment.name": "install-ns", + "global.provider": "k8s", + "global.actions.execute": "install", + "installer.imagePullPolicy": "Always", + "installer.upgrade.upgradeType": "zero-downtime", + affintiyBasePath + "key": "kubernetes.io/os", + affintiyBasePath + "operator": "In", + affintiyBasePath + "values[0]": "linux", + }, + } + + helmChartPath, err := filepath.Abs(PegaHelmChartPath) + require.NoError(t, err) + + yamlContent := RenderTemplate(t, options, helmChartPath, []string{"charts/installer/templates/pega-installer-job.yaml"}) + yamlSplit := strings.Split(yamlContent, "---") + + var jobObj k8sbatch.Job + UnmarshalK8SYaml(t, yamlSplit[1], &jobObj) + + jobAffinity := jobObj.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution + + require.Equal(t, "kubernetes.io/os", jobAffinity.NodeSelectorTerms[0].MatchExpressions[0].Key) + require.Equal(t, "In", string(jobAffinity.NodeSelectorTerms[0].MatchExpressions[0].Operator)) + require.Equal(t, "linux", jobAffinity.NodeSelectorTerms[0].MatchExpressions[0].Values[0]) +} + func assertJob(t *testing.T, jobYaml string, expectedJob pegaDbJob, options *helm.Options, pullPolicy string) { var jobObj k8sbatch.Job UnmarshalK8SYaml(t, jobYaml, &jobObj) diff --git a/terratest/src/test/pega/pega-search-deployment_test.go b/terratest/src/test/pega/pega-search-deployment_test.go index 1096585f1..dfc87a802 100644 --- a/terratest/src/test/pega/pega-search-deployment_test.go +++ b/terratest/src/test/pega/pega-search-deployment_test.go @@ -1,48 +1,45 @@ package pega import ( + "path/filepath" + "testing" + "github.com/gruntwork-io/terratest/modules/helm" "github.com/stretchr/testify/require" appsv1beta2 "k8s.io/api/apps/v1beta2" k8score "k8s.io/api/core/v1" - "path/filepath" - "testing" ) - - -func TestPegaSearchDeployment(t *testing.T){ - var supportedVendors = []string{"k8s","openshift","eks","gke","aks","pks"} - var supportedOperations = []string{"deploy","install-deploy","upgrade-deploy"} - var deploymentNames = []string{"pega","myapp-dev"} +func TestPegaSearchDeployment(t *testing.T) { + var supportedVendors = []string{"k8s", "openshift", "eks", "gke", "aks", "pks"} + var supportedOperations = []string{"deploy", "install-deploy", "upgrade-deploy"} + var deploymentNames = []string{"pega", "myapp-dev"} helmChartPath, err := filepath.Abs(PegaHelmChartPath) require.NoError(t, err) + for _, vendor := range supportedVendors { - for _,vendor := range supportedVendors{ + for _, operation := range supportedOperations { - for _,operation := range supportedOperations{ + for _, depName := range deploymentNames { - for _, depName := range deploymentNames { - - var options = &helm.Options{ - SetValues: map[string]string{ - "global.deployment.name": depName, - "global.provider": vendor, - "global.actions.execute": operation, + var options = &helm.Options{ + SetValues: map[string]string{ + "global.deployment.name": depName, + "global.provider": vendor, + "global.actions.execute": operation, "installer.upgrade.upgradeType": "zero-downtime", - "global.storageClassName": "storage-class", - }, - } + "global.storageClassName": "storage-class", + }, + } - yamlContent := RenderTemplate(t, options, helmChartPath, []string{"charts/pegasearch/templates/pega-search-deployment.yaml"}) - VerifySearchDeployment(t, yamlContent, options) - } + yamlContent := RenderTemplate(t, options, helmChartPath, []string{"charts/pegasearch/templates/pega-search-deployment.yaml"}) + VerifySearchDeployment(t, yamlContent, options) + } } } - } func VerifySearchDeployment(t *testing.T, yamlContent string, options *helm.Options) { @@ -58,4 +55,58 @@ func VerifySearchDeployment(t *testing.T, yamlContent string, options *helm.Opti statefulsetSpec := statefulsetObj.Spec.Template.Spec require.Equal(t, statefulsetSpec.Containers[0].VolumeMounts[0].Name, "esstorage") require.Equal(t, statefulsetSpec.Containers[0].VolumeMounts[0].MountPath, "/usr/share/elasticsearch/data") -} \ No newline at end of file + statefulsetAffinity := statefulsetObj.Spec.Template.Spec.Affinity + require.Empty(t, statefulsetAffinity) +} + +func TestPegaSearchDeploymentWithPodAffinity(t *testing.T) { + var supportedVendors = []string{"k8s", "eks", "gke", "aks", "pks"} + var supportedOperations = []string{"deploy", "install-deploy", "upgrade-deploy"} + var deploymentNames = []string{"pega", "myapp-dev"} + + helmChartPath, err := filepath.Abs(PegaHelmChartPath) + require.NoError(t, err) + + var affintiyBasePath = "pegasearch.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0]." + + for _, vendor := range supportedVendors { + + for _, operation := range supportedOperations { + + for _, depName := range deploymentNames { + + var options = &helm.Options{ + SetValues: map[string]string{ + "global.deployment.name": depName, + "global.provider": vendor, + "global.actions.execute": operation, + "installer.upgrade.upgradeType": "zero-downtime", + "global.storageClassName": "storage-class", + affintiyBasePath + "key": "kubernetes.io/os", + affintiyBasePath + "operator": "In", + affintiyBasePath + "values[0]": "linux", + }, + } + + yamlContent := RenderTemplate(t, options, helmChartPath, []string{"charts/pegasearch/templates/pega-search-deployment.yaml"}) + VerifySearchDeploymentWithAffinity(t, yamlContent, options) + } + } + } +} + +func VerifySearchDeploymentWithAffinity(t *testing.T, yamlContent string, options *helm.Options) { + var statefulsetObj appsv1beta2.StatefulSet + storageClassName := "storage-class" + UnmarshalK8SYaml(t, yamlContent, &statefulsetObj) + require.Equal(t, statefulsetObj.ObjectMeta.Name, getObjName(options, "-search")) + require.Equal(t, *statefulsetObj.Spec.Replicas, int32(1)) + require.Equal(t, statefulsetObj.Spec.VolumeClaimTemplates[0].Name, "esstorage") + require.Equal(t, statefulsetObj.Spec.VolumeClaimTemplates[0].Spec.AccessModes[0], k8score.PersistentVolumeAccessMode("ReadWriteOnce")) + require.Equal(t, statefulsetObj.Spec.VolumeClaimTemplates[0].Spec.StorageClassName, &storageClassName) + require.Equal(t, statefulsetObj.Spec.ServiceName, getObjName(options, "-search")) + statefulsetAffinity := statefulsetObj.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution + require.Equal(t, "kubernetes.io/os", statefulsetAffinity.NodeSelectorTerms[0].MatchExpressions[0].Key) + require.Equal(t, "In", string(statefulsetAffinity.NodeSelectorTerms[0].MatchExpressions[0].Operator)) + require.Equal(t, "linux", statefulsetAffinity.NodeSelectorTerms[0].MatchExpressions[0].Values[0]) +} diff --git a/terratest/src/test/pega/pega-tier-deployment_test.go b/terratest/src/test/pega/pega-tier-deployment_test.go index de280bade..b45eeefbc 100644 --- a/terratest/src/test/pega/pega-tier-deployment_test.go +++ b/terratest/src/test/pega/pega-tier-deployment_test.go @@ -62,6 +62,49 @@ func assertStreamWithSorageClass(t *testing.T, streamYaml string, options *helm. require.Equal(t, &storageClassName, statefulsetObj.Spec.VolumeClaimTemplates[0].Spec.StorageClassName) } +func TestPegaTierDeploymentWithPodAffinity(t *testing.T) { + var supportedVendors = []string{"k8s", "eks", "gke", "aks", "pks"} + helmChartPath, err := filepath.Abs(PegaHelmChartPath) + var affintiyBasePath = "global.tier[0].affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution." + require.NoError(t, err) + + var depObjWeb appsv1.Deployment + var depObjBatch appsv1.Deployment + var depObjStream appsv1.Deployment + + for _, vendor := range supportedVendors { + var options = &helm.Options{ + SetValues: map[string]string{ + "global.provider": vendor, + "global.actions.execute": "deploy", + "global.deployment.name": "pega", + "installer.upgrade.upgradeType": "zero-downtime", + "global.tier[0].name": "web", + "global.tier[1].name": "batch", + "global.tier[2].name": "stream", + affintiyBasePath + "nodeSelectorTerms[0].matchExpressions[0].key": "kubernetes.io/os", + affintiyBasePath + "nodeSelectorTerms[0].matchExpressions[0].operator": "In", + affintiyBasePath + "nodeSelectorTerms[0].matchExpressions[0].values[0]": "linux", + }, + } + + yamlContent := RenderTemplate(t, options, helmChartPath, []string{"templates/pega-tier-deployment.yaml"}) + yamlSplit := strings.Split(yamlContent, "---") + + UnmarshalK8SYaml(t, yamlSplit[1], &depObjWeb) + deploymentNodeAffinityWeb := depObjWeb.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution + require.Equal(t, "kubernetes.io/os", deploymentNodeAffinityWeb.NodeSelectorTerms[0].MatchExpressions[0].Key) + require.Equal(t, "In", string(deploymentNodeAffinityWeb.NodeSelectorTerms[0].MatchExpressions[0].Operator)) + require.Equal(t, "linux", deploymentNodeAffinityWeb.NodeSelectorTerms[0].MatchExpressions[0].Values[0]) + UnmarshalK8SYaml(t, yamlSplit[2], &depObjBatch) + deploymentAffinityBatch := depObjBatch.Spec.Template.Spec.Affinity + require.Empty(t, deploymentAffinityBatch) + UnmarshalK8SYaml(t, yamlSplit[3], &depObjStream) + deploymentAffinityStream := depObjStream.Spec.Template.Spec.Affinity + require.Empty(t, deploymentAffinityStream) + } +} + func TestPegaTierDeploymentWithFSGroup(t *testing.T) { var supportedVendors = []string{"k8s", "eks", "gke", "aks", "pks"} customFsGroups := map[string]int64{ diff --git a/terratest/src/test/pega/pega-tier-service_test.go b/terratest/src/test/pega/pega-tier-service_test.go index 5dfdab380..db0c7f51a 100644 --- a/terratest/src/test/pega/pega-tier-service_test.go +++ b/terratest/src/test/pega/pega-tier-service_test.go @@ -88,3 +88,40 @@ type pegaServices struct { Port int32 TargetPort intstr.IntOrString } + +func TestPegaTierServiceWithCustomPorts(t *testing.T) { + var supportedVendors = []string{"k8s", "openshift", "eks", "gke", "aks", "pks"} + var supportedOperations = []string{"deploy", "install-deploy", "upgrade-deploy"} + var deploymentNames = []string{"pega", "myapp-dev"} + + helmChartPath, err := filepath.Abs(PegaHelmChartPath) + require.NoError(t, err) + var serviceObj k8score.Service + for _, vendor := range supportedVendors { + for _, operation := range supportedOperations { + for _, depName := range deploymentNames { + + var options = &helm.Options{ + ValuesFiles: []string{"data/values_service_custom_ports.yaml"}, + SetValues: map[string]string{ + "global.deployment.name": depName, + "global.provider": vendor, + "global.actions.execute": operation, + "installer.upgrade.upgradeType": "zero-downtime", + }, + } + yamlContent := RenderTemplate(t, options, helmChartPath, []string{"templates/pega-tier-service.yaml"}) + yamlSplit := strings.Split(yamlContent, "---") + UnmarshalK8SYaml(t, yamlSplit[1], &serviceObj) + ports := serviceObj.Spec.Ports + require.Equal(t, 2, len(ports)) + require.Equal(t, "http", ports[0].Name) + require.Equal(t, int32(80), ports[0].Port) + require.Equal(t, int32(8080), ports[0].TargetPort.IntVal) + require.Equal(t, "port1", ports[1].Name) + require.Equal(t, int32(5005), ports[1].Port) + require.Equal(t, int32(5005), ports[1].TargetPort.IntVal) + } + } + } +}