diff --git a/docs/snapshot/main/config.js b/docs/snapshot/main/config.js index 722be945..0d0271f6 100644 --- a/docs/snapshot/main/config.js +++ b/docs/snapshot/main/config.js @@ -1,9 +1,9 @@ function createConfig() { return { home: "docs/about/01_overview", - release: "3.4.0", + release: "3.4.1", releases: [ - "3.4.0" + "3.4.1" ], pathColors: { "*": "blue-grey" diff --git a/docs/snapshot/main/search-index.json b/docs/snapshot/main/search-index.json index 429296f0..282e505f 100644 --- a/docs/snapshot/main/search-index.json +++ b/docs/snapshot/main/search-index.json @@ -7,7 +7,7 @@ }, { "location": "/docs/installation/01_installation", - "text": " The Coherence Operator is available as an image from the GitHub container registry ghcr.io/oracle/coherence-operator:3.4.0 that can easily be installed into a Kubernetes cluster. ", + "text": " The Coherence Operator is available as an image from the GitHub container registry ghcr.io/oracle/coherence-operator:3.4.1 that can easily be installed into a Kubernetes cluster. ", "title": "preambule" }, { @@ -32,7 +32,7 @@ }, { "location": "/docs/installation/01_installation", - "text": " The Coherence Operator uses a single image, the Operator also runs as an init-container in the Coherence cluster Pods. ghcr.io/oracle/coherence-operator:3.4.0 - The Operator image. If no image is specified in the Coherence yaml, then the default Coherence image will also be used, ghcr.io/oracle/coherence-ce:22.06.8 - The default Coherence image. If using a private image registry then these images will all need to be pushed to that registry for the Operator to work. The default Coherence image may be omitted if all Coherence applications will use custom Coherence images. ", + "text": " The Coherence Operator uses a single image, the Operator also runs as an init-container in the Coherence cluster Pods. ghcr.io/oracle/coherence-operator:3.4.1 - The Operator image. If no image is specified in the Coherence yaml, then the default Coherence image will also be used, ghcr.io/oracle/coherence-ce:22.06.9 - The default Coherence image. If using a private image registry then these images will all need to be pushed to that registry for the Operator to work. The default Coherence image may be omitted if all Coherence applications will use custom Coherence images. ", "title": "Coherence Operator Images" }, { @@ -47,7 +47,7 @@ }, { "location": "/docs/installation/01_installation", - "text": " If you want the default Coherence Operator installation then the simplest solution is use kubectl to apply the manifests from the Operator release. kubectl apply -f https://github.com/oracle/coherence-operator/releases/download/v3.4.0/coherence-operator.yaml This will create a namespace called coherence and install the Operator into it along with all the required ClusterRole and RoleBinding resources. The coherence namespace can be changed by downloading and editing the yaml file. Because the coherence-operator.yaml manifest also creates the namespace, the corresponding kubectl delete command will remove the namespace and everything deployed to it ! If you do not want this behaviour you should edit the coherence-operator.yaml to remove the namespace section from the start of the file. Instead of using a hard coded version in the command above you can find the latest Operator version using curl : export VERSION=$(curl -s \\ https://api.github.com/repos/oracle/coherence-operator/releases/latest \\ | grep '\"name\": \"v' \\ | cut -d '\"' -f 4 \\ | cut -b 2-10) Then download with: kubectl apply -f https://github.com/oracle/coherence-operator/releases/download/${VERSION}/coherence-operator.yaml Change the Operator Replica Count When installing with single manifest yaml file, the replica count can be changed by editing the yaml file itself to change the occurrence of replicas: 3 in the manifest yaml to replicas: 1 For example, this could be done using sed sed -i -e 's/replicas: 3/replicas: 1/g' coherence-operator.yaml Or on MacOS, where sed is slightly different: sed -i '' -e 's/replicas: 3/replicas: 1/g' coherence-operator.yaml ", + "text": " If you want the default Coherence Operator installation then the simplest solution is use kubectl to apply the manifests from the Operator release. kubectl apply -f https://github.com/oracle/coherence-operator/releases/download/v3.4.1/coherence-operator.yaml This will create a namespace called coherence and install the Operator into it along with all the required ClusterRole and RoleBinding resources. The coherence namespace can be changed by downloading and editing the yaml file. Because the coherence-operator.yaml manifest also creates the namespace, the corresponding kubectl delete command will remove the namespace and everything deployed to it ! If you do not want this behaviour you should edit the coherence-operator.yaml to remove the namespace section from the start of the file. Instead of using a hard coded version in the command above you can find the latest Operator version using curl : export VERSION=$(curl -s \\ https://api.github.com/repos/oracle/coherence-operator/releases/latest \\ | grep '\"name\": \"v' \\ | cut -d '\"' -f 4 \\ | cut -b 2-10) Then download with: kubectl apply -f https://github.com/oracle/coherence-operator/releases/download/${VERSION}/coherence-operator.yaml Change the Operator Replica Count When installing with single manifest yaml file, the replica count can be changed by editing the yaml file itself to change the occurrence of replicas: 3 in the manifest yaml to replicas: 1 For example, this could be done using sed sed -i -e 's/replicas: 3/replicas: 1/g' coherence-operator.yaml Or on MacOS, where sed is slightly different: sed -i '' -e 's/replicas: 3/replicas: 1/g' coherence-operator.yaml ", "title": "Default Install with Kubectl" }, { @@ -62,7 +62,7 @@ }, { "location": "/docs/installation/01_installation", - "text": " The Helm chart uses a default Operator image from ghcr.io/oracle/coherence-operator:3.4.0 . If the image needs to be pulled from a different location (for example an internal registry) then there are two ways to override the default. Either set the individual image.registry , image.name and image.tag values, or set the whole image name by setting the image value. For example, if the Operator image has been deployed into a private registry named foo.com but with the same image name coherence-operator and tag 3.4.0 as the default image, then just the image.registry needs to be specified. In the example below, the image used to run the Operator will be foo.com/coherence-operator:3.4.0 . helm install \\ --namespace <namespace> \\ --set image.registry=foo.com \\ coherence-operator \\ coherence/coherence-operator All three of the image parts can be specified individually using --set options. In the example below, the image used to run the Operator will be foo.com/operator:1.2.3 . helm install \\ --namespace <namespace> \\ --set image.registry=foo.com \\ --set image.name=operator \\ --set image.tag=1.2.3 coherence-operator \\ coherence/coherence-operator Alternatively, the image can be set using a single image value. For example, the command below will set the Operator image to images.com/coherence-operator:0.1.2 . helm install \\ --namespace <namespace> \\ --set image=images.com/coherence-operator:0.1.2 \\ coherence-operator \\ coherence/coherence-operator ", + "text": " The Helm chart uses a default Operator image from ghcr.io/oracle/coherence-operator:3.4.1 . If the image needs to be pulled from a different location (for example an internal registry) then there are two ways to override the default. Either set the individual image.registry , image.name and image.tag values, or set the whole image name by setting the image value. For example, if the Operator image has been deployed into a private registry named foo.com but with the same image name coherence-operator and tag 3.4.1 as the default image, then just the image.registry needs to be specified. In the example below, the image used to run the Operator will be foo.com/coherence-operator:3.4.1 . helm install \\ --namespace <namespace> \\ --set image.registry=foo.com \\ coherence-operator \\ coherence/coherence-operator All three of the image parts can be specified individually using --set options. In the example below, the image used to run the Operator will be foo.com/operator:1.2.3 . helm install \\ --namespace <namespace> \\ --set image.registry=foo.com \\ --set image.name=operator \\ --set image.tag=1.2.3 coherence-operator \\ coherence/coherence-operator Alternatively, the image can be set using a single image value. For example, the command below will set the Operator image to images.com/coherence-operator:0.1.2 . helm install \\ --namespace <namespace> \\ --set image=images.com/coherence-operator:0.1.2 \\ coherence-operator \\ coherence/coherence-operator ", "title": "Set the Operator Image" }, { @@ -142,7 +142,7 @@ }, { "location": "/docs/installation/01_installation", - "text": " For more flexibility but the simplest way to install the Coherence Operator is to use the Helm chart. This ensures that all the correct resources will be created in Kubernetes. Add the Coherence Helm Repository Add the coherence helm repository using the following commands: helm repo add coherence https://oracle.github.io/coherence-operator/charts helm repo update To avoid confusion, the URL https://oracle.github.io/coherence-operator/charts is a Helm repo, it is not a website you open in a browser. You may think we shouldn’t have to say this, but you’d be surprised. Install the Coherence Operator Helm chart Once the Coherence Helm repo has been configured the Coherence Operator can be installed using a normal Helm 3 install command: helm install \\ --namespace <namespace> \\ coherence \\ coherence/coherence-operator where <namespace> is the namespace that the Coherence Operator will be installed into. coherence is the name of this Helm installation. Set the Operator Image The Helm chart uses a default Operator image from ghcr.io/oracle/coherence-operator:3.4.0 . If the image needs to be pulled from a different location (for example an internal registry) then there are two ways to override the default. Either set the individual image.registry , image.name and image.tag values, or set the whole image name by setting the image value. For example, if the Operator image has been deployed into a private registry named foo.com but with the same image name coherence-operator and tag 3.4.0 as the default image, then just the image.registry needs to be specified. In the example below, the image used to run the Operator will be foo.com/coherence-operator:3.4.0 . helm install \\ --namespace <namespace> \\ --set image.registry=foo.com \\ coherence-operator \\ coherence/coherence-operator All three of the image parts can be specified individually using --set options. In the example below, the image used to run the Operator will be foo.com/operator:1.2.3 . helm install \\ --namespace <namespace> \\ --set image.registry=foo.com \\ --set image.name=operator \\ --set image.tag=1.2.3 coherence-operator \\ coherence/coherence-operator Alternatively, the image can be set using a single image value. For example, the command below will set the Operator image to images.com/coherence-operator:0.1.2 . helm install \\ --namespace <namespace> \\ --set image=images.com/coherence-operator:0.1.2 \\ coherence-operator \\ coherence/coherence-operator Image Pull Secrets If the image is to be pulled from a secure repository that requires credentials then the image pull secrets can be specified. See the Kubernetes documentation on Pulling from a Private Registry . Add Pull Secrets Using a Values File Create a values file that specifies the secrets, for example the private-repo-values.yaml file below: imagePullSecrets: - name: registry-secrets Now use that file in the Helm install command: helm install \\ --namespace <namespace> \\ -f private-repo-values.yaml coherence-operator \\ coherence/coherence-operator the private-repo-values.yaml values fle will be used by Helm to inject the settings into the Operator deployment Add Pull Secrets Using --set Although the imagePullSecrets field in the values file is an array of name to value pairs it is possible to set these values with the normal Helm --set parameter. helm install \\ --namespace <namespace> \\ --set imagePullSecrets[0].name=registry-secrets coherence-operator \\ coherence/coherence-operator this creates the same imagePullSecrets as the values file above. Change the Operator Replica Count To change the replica count when installing the Operator using Helm, the replicas value can be set. For example, to change the replica count from 3 to 1, the --set replicas=1 option can be used. helm install \\ --namespace <namespace> \\ --set replicas=1 coherence \\ coherence/coherence-operator Set the Watch Namespaces To set the watch namespaces when installing with helm set the watchNamespaces value, for example: helm install \\ --namespace <namespace> \\ --set watchNamespaces=payments,catalog,customers \\ coherence-operator \\ coherence/coherence-operator The payments , catalog and customers namespaces will be watched by the Operator. Set the Watch Namespace to the Operator’s Install Namespace When installing the Operator using the Helm chart, there is a convenience value that can be set if the Operator should only monitor the same namespace that it is installed into. By setting the onlySameNamespace value to true the watch namespace will be set to the installation namespace. If the onlySameNamespace value is set to true then any value set for the watchNamespaces value will be ignored. For example, the command below will set onlySameNamespace to true, and the Operator will be installed into, and only monitor the coh-testing namespace. helm install \\ --namespace coh-testing \\ --set onlySameNamespace=true \\ coherence-operator \\ coherence/coherence-operator In the example below, the onlySameNamespace is set to true, so the Operator will be installed into, and only monitor the coh-testing namespace. Even though the watchNamespaces value is set, it will be ignored. helm install \\ --namespace coh-testing \\ --set watchNamespaces=payments,catalog,customers \\ --set onlySameNamespace=true \\ coherence-operator \\ coherence/coherence-operator Install the Operator with a Security Context The Operator container can be configured with a Pod securityContext or a container securityContext , so that it runs as a non-root user. This can be done using a values file: Set the Pod securityContext podSecurityContext: runAsNonRoot: true runAsUser: 1000 Set the Container securityContext securityContext: runAsNonRoot: true runAsUser: 1000 Then the security-values.yaml values file above can be used in the Helm install command. helm install \\ --namespace <namespace> \\ --values security-values.yaml \\ coherence \\ coherence/coherence-operator Alternatively, the Pod or container securityContext values can be set on the command line as --set parameters: Set the Pod securityContext helm install \\ --namespace <namespace> \\ --set podSecurityContext.runAsNonRoot=true \\ --set podSecurityContext.runAsUser=1000 \\ coherence \\ coherence/coherence-operator Set the Container securityContext helm install \\ --namespace <namespace> \\ --set securityContext.runAsNonRoot=true \\ --set securityContext.runAsUser=1000 \\ coherence \\ coherence/coherence-operator Set Additional Labels When installing the Operator with Helm, it is possible to set additional labels to be applied to the Operator Pods and to the Operator Deployment. Adding Pod Labels To add labels to the Operator Pods set the labels value, either on the command line using --set or in the values file. Note Setting labels will only apply the additional labels to the Operator Pods, they will not be applied to any other resource created by the Helm chart. For example, using the command line: helm install \\ --namespace <namespace> \\ --set labels.one=value-one \\ --set labels.two=value-two \\ coherence \\ coherence/coherence-operator The command above would add the following additional labels one and two to the Operator Pod as shown below: apiVersion: v1 kind: Pod metadata: name: coherence-operator labels: one: value-one two: value-two The same labels could also be specified in a values file: labels: one: value-one two: value-two Adding Deployment Labels To add labels to the Operator Deployment set the deploymentLabels value, either on the command line using --set or in the values file. Note Setting deploymentLabels will only apply the additional labels to the Deployment, they will not be applied to any other resource created by the Helm chart. For example, using the command line: helm install \\ --namespace <namespace> \\ --set deploymentLabels.one=value-one \\ --set deploymentLabels.two=value-two \\ coherence \\ coherence/coherence-operator The command above would add the following additional labels one and two to the Operator Pod as shown below: apiVersion: apps/v1 kind: Deployment metadata: name: coherence-operator labels: one: value-one two: value-two The same labels could also be specified in a values file: deploymentLabels: one: value-one two: value-two Set Additional Annotations When installing the Operator with Helm, it is possible to set additional annotations to be applied to the Operator Pods and to the Operator Deployment. Adding Pod Annotations To add annotations to the Operator Pods set the annotations value, either on the command line using --set or in the values file. Note Setting annotations will only apply the additional annotations to the Operator Pods, they will not be applied to any other resource created by the Helm chart. For example, using the command line: helm install \\ --namespace <namespace> \\ --set annotations.one=value-one \\ --set annotations.two=value-two \\ coherence \\ coherence/coherence-operator The command above would add the following additional annotations one and two to the Operator Pod as shown below: apiVersion: v1 kind: Pod metadata: name: coherence-operator annotations: one: value-one two: value-two The same annotations could also be specified in a values file: annotations: one: value-one two: value-two Adding Deployment Annotations To add annotations to the Operator Deployment set the deploymentAnnotations value, either on the command line using --set or in the values file. Note Setting deploymentAnnotations will only apply the additional annotations to the Deployment, they will not be applied to any other resource created by the Helm chart. For example, using the command line: helm install \\ --namespace <namespace> \\ --set deploymentAnnotations.one=value-one \\ --set deploymentAnnotations.two=value-two \\ coherence \\ coherence/coherence-operator The command above would add the following additional annotations one and two to the Operator Pod as shown below: apiVersion: apps/v1 kind: Deployment metadata: name: coherence-operator annotations: one: value-one two: value-two The same annotations could also be specified in a values file: deploymentAnnotations: one: value-one two: value-two CoherenceJob CRD Support By default, the Operator will install both CRDs, Coherence and CoherenceJob . If support for CoherenceJob is not required then it can be excluded from being installed setting the Operator command line parameter --install-job-crd to false . When installing with Helm, the allowCoherenceJobs value can be set to false to disable support for CoherenceJob resources (the default value is true ). helm install \\ --namespace <namespace> \\ --set allowCoherenceJobs=false \\ coherence \\ coherence/coherence-operator Uninstall the Coherence Operator Helm chart To uninstall the operator: helm delete coherence-operator --namespace <namespace> ", + "text": " For more flexibility but the simplest way to install the Coherence Operator is to use the Helm chart. This ensures that all the correct resources will be created in Kubernetes. Add the Coherence Helm Repository Add the coherence helm repository using the following commands: helm repo add coherence https://oracle.github.io/coherence-operator/charts helm repo update To avoid confusion, the URL https://oracle.github.io/coherence-operator/charts is a Helm repo, it is not a website you open in a browser. You may think we shouldn’t have to say this, but you’d be surprised. Install the Coherence Operator Helm chart Once the Coherence Helm repo has been configured the Coherence Operator can be installed using a normal Helm 3 install command: helm install \\ --namespace <namespace> \\ coherence \\ coherence/coherence-operator where <namespace> is the namespace that the Coherence Operator will be installed into. coherence is the name of this Helm installation. Set the Operator Image The Helm chart uses a default Operator image from ghcr.io/oracle/coherence-operator:3.4.1 . If the image needs to be pulled from a different location (for example an internal registry) then there are two ways to override the default. Either set the individual image.registry , image.name and image.tag values, or set the whole image name by setting the image value. For example, if the Operator image has been deployed into a private registry named foo.com but with the same image name coherence-operator and tag 3.4.1 as the default image, then just the image.registry needs to be specified. In the example below, the image used to run the Operator will be foo.com/coherence-operator:3.4.1 . helm install \\ --namespace <namespace> \\ --set image.registry=foo.com \\ coherence-operator \\ coherence/coherence-operator All three of the image parts can be specified individually using --set options. In the example below, the image used to run the Operator will be foo.com/operator:1.2.3 . helm install \\ --namespace <namespace> \\ --set image.registry=foo.com \\ --set image.name=operator \\ --set image.tag=1.2.3 coherence-operator \\ coherence/coherence-operator Alternatively, the image can be set using a single image value. For example, the command below will set the Operator image to images.com/coherence-operator:0.1.2 . helm install \\ --namespace <namespace> \\ --set image=images.com/coherence-operator:0.1.2 \\ coherence-operator \\ coherence/coherence-operator Image Pull Secrets If the image is to be pulled from a secure repository that requires credentials then the image pull secrets can be specified. See the Kubernetes documentation on Pulling from a Private Registry . Add Pull Secrets Using a Values File Create a values file that specifies the secrets, for example the private-repo-values.yaml file below: imagePullSecrets: - name: registry-secrets Now use that file in the Helm install command: helm install \\ --namespace <namespace> \\ -f private-repo-values.yaml coherence-operator \\ coherence/coherence-operator the private-repo-values.yaml values fle will be used by Helm to inject the settings into the Operator deployment Add Pull Secrets Using --set Although the imagePullSecrets field in the values file is an array of name to value pairs it is possible to set these values with the normal Helm --set parameter. helm install \\ --namespace <namespace> \\ --set imagePullSecrets[0].name=registry-secrets coherence-operator \\ coherence/coherence-operator this creates the same imagePullSecrets as the values file above. Change the Operator Replica Count To change the replica count when installing the Operator using Helm, the replicas value can be set. For example, to change the replica count from 3 to 1, the --set replicas=1 option can be used. helm install \\ --namespace <namespace> \\ --set replicas=1 coherence \\ coherence/coherence-operator Set the Watch Namespaces To set the watch namespaces when installing with helm set the watchNamespaces value, for example: helm install \\ --namespace <namespace> \\ --set watchNamespaces=payments,catalog,customers \\ coherence-operator \\ coherence/coherence-operator The payments , catalog and customers namespaces will be watched by the Operator. Set the Watch Namespace to the Operator’s Install Namespace When installing the Operator using the Helm chart, there is a convenience value that can be set if the Operator should only monitor the same namespace that it is installed into. By setting the onlySameNamespace value to true the watch namespace will be set to the installation namespace. If the onlySameNamespace value is set to true then any value set for the watchNamespaces value will be ignored. For example, the command below will set onlySameNamespace to true, and the Operator will be installed into, and only monitor the coh-testing namespace. helm install \\ --namespace coh-testing \\ --set onlySameNamespace=true \\ coherence-operator \\ coherence/coherence-operator In the example below, the onlySameNamespace is set to true, so the Operator will be installed into, and only monitor the coh-testing namespace. Even though the watchNamespaces value is set, it will be ignored. helm install \\ --namespace coh-testing \\ --set watchNamespaces=payments,catalog,customers \\ --set onlySameNamespace=true \\ coherence-operator \\ coherence/coherence-operator Install the Operator with a Security Context The Operator container can be configured with a Pod securityContext or a container securityContext , so that it runs as a non-root user. This can be done using a values file: Set the Pod securityContext podSecurityContext: runAsNonRoot: true runAsUser: 1000 Set the Container securityContext securityContext: runAsNonRoot: true runAsUser: 1000 Then the security-values.yaml values file above can be used in the Helm install command. helm install \\ --namespace <namespace> \\ --values security-values.yaml \\ coherence \\ coherence/coherence-operator Alternatively, the Pod or container securityContext values can be set on the command line as --set parameters: Set the Pod securityContext helm install \\ --namespace <namespace> \\ --set podSecurityContext.runAsNonRoot=true \\ --set podSecurityContext.runAsUser=1000 \\ coherence \\ coherence/coherence-operator Set the Container securityContext helm install \\ --namespace <namespace> \\ --set securityContext.runAsNonRoot=true \\ --set securityContext.runAsUser=1000 \\ coherence \\ coherence/coherence-operator Set Additional Labels When installing the Operator with Helm, it is possible to set additional labels to be applied to the Operator Pods and to the Operator Deployment. Adding Pod Labels To add labels to the Operator Pods set the labels value, either on the command line using --set or in the values file. Note Setting labels will only apply the additional labels to the Operator Pods, they will not be applied to any other resource created by the Helm chart. For example, using the command line: helm install \\ --namespace <namespace> \\ --set labels.one=value-one \\ --set labels.two=value-two \\ coherence \\ coherence/coherence-operator The command above would add the following additional labels one and two to the Operator Pod as shown below: apiVersion: v1 kind: Pod metadata: name: coherence-operator labels: one: value-one two: value-two The same labels could also be specified in a values file: labels: one: value-one two: value-two Adding Deployment Labels To add labels to the Operator Deployment set the deploymentLabels value, either on the command line using --set or in the values file. Note Setting deploymentLabels will only apply the additional labels to the Deployment, they will not be applied to any other resource created by the Helm chart. For example, using the command line: helm install \\ --namespace <namespace> \\ --set deploymentLabels.one=value-one \\ --set deploymentLabels.two=value-two \\ coherence \\ coherence/coherence-operator The command above would add the following additional labels one and two to the Operator Pod as shown below: apiVersion: apps/v1 kind: Deployment metadata: name: coherence-operator labels: one: value-one two: value-two The same labels could also be specified in a values file: deploymentLabels: one: value-one two: value-two Set Additional Annotations When installing the Operator with Helm, it is possible to set additional annotations to be applied to the Operator Pods and to the Operator Deployment. Adding Pod Annotations To add annotations to the Operator Pods set the annotations value, either on the command line using --set or in the values file. Note Setting annotations will only apply the additional annotations to the Operator Pods, they will not be applied to any other resource created by the Helm chart. For example, using the command line: helm install \\ --namespace <namespace> \\ --set annotations.one=value-one \\ --set annotations.two=value-two \\ coherence \\ coherence/coherence-operator The command above would add the following additional annotations one and two to the Operator Pod as shown below: apiVersion: v1 kind: Pod metadata: name: coherence-operator annotations: one: value-one two: value-two The same annotations could also be specified in a values file: annotations: one: value-one two: value-two Adding Deployment Annotations To add annotations to the Operator Deployment set the deploymentAnnotations value, either on the command line using --set or in the values file. Note Setting deploymentAnnotations will only apply the additional annotations to the Deployment, they will not be applied to any other resource created by the Helm chart. For example, using the command line: helm install \\ --namespace <namespace> \\ --set deploymentAnnotations.one=value-one \\ --set deploymentAnnotations.two=value-two \\ coherence \\ coherence/coherence-operator The command above would add the following additional annotations one and two to the Operator Pod as shown below: apiVersion: apps/v1 kind: Deployment metadata: name: coherence-operator annotations: one: value-one two: value-two The same annotations could also be specified in a values file: deploymentAnnotations: one: value-one two: value-two CoherenceJob CRD Support By default, the Operator will install both CRDs, Coherence and CoherenceJob . If support for CoherenceJob is not required then it can be excluded from being installed setting the Operator command line parameter --install-job-crd to false . When installing with Helm, the allowCoherenceJobs value can be set to false to disable support for CoherenceJob resources (the default value is true ). helm install \\ --namespace <namespace> \\ --set allowCoherenceJobs=false \\ coherence \\ coherence/coherence-operator Uninstall the Coherence Operator Helm chart To uninstall the operator: helm delete coherence-operator --namespace <namespace> ", "title": "Installing With Helm" }, { @@ -187,7 +187,7 @@ }, { "location": "/docs/installation/01_installation", - "text": " If using VMWare Tanzu the Coherence Operator can be installed as a package. Under the covers, Tanzu uses the Carvel tool set to deploy packages. The Carvel tools can be used outside Tanzu, so the Coherence Operator repo and package images could also be deployed using a standalone Carvel kapp-controller . The Coherence Operator release published two images required to deploy the Operator as a Tanzu package. ghcr.io/oracle/coherence-operator-package:3.4.0 - the Coherence Operator package ghcr.io/oracle/coherence-operator-repo:3.4.0 - the Coherence Operator repository Install the Coherence Repository The first step to deploy the Coherence Operator package in Tanzu is to add the repository. This can be done using the Tanzu CLI. tanzu package repository add coherence-repo \\ --url ghcr.io/oracle/coherence-operator-repo:3.3.5 \\ --namespace coherence \\ --create-namespace The installed repositories can be listed using the CLI: tanzu package repository list --namespace coherence which should display something like the following NAME REPOSITORY TAG STATUS DETAILS coherence-repo ghcr.io/oracle/coherence-operator-repo 1h Reconcile succeeded The available packages in the Coherence repository can also be displayed using the CLI tanzu package available list --namespace coherence which should include the Operator package, coherence-operator.oracle.github.com something like the following NAME DISPLAY-NAME SHORT-DESCRIPTION LATEST-VERSION coherence-operator.oracle.github.com Oracle Coherence Operator A Kubernetes operator for managing Oracle Coherence clusters 3.3.5 Install the Coherence Operator Package Once the Coherence Operator repository has been installed, the coherence-operator.oracle.github.com package can be installed, which will install the Coherence Operator itself. tanzu package install coherence \\ --package-name coherence-operator.oracle.github.com \\ --version 3.3.5 \\ --namespace coherence The Tanzu CLI will display the various steps it is going through to install the package and if all goes well, finally display Added installed package 'coherence' The packages installed in the coherence namespace can be displayed using the CLI. tanzu package installed list --namespace coherence which should display the Coherence Operator package. NAME PACKAGE-NAME PACKAGE-VERSION STATUS coherence coherence-operator.oracle.github.com 3.3.5 Reconcile succeeded The Operator is now installed and ready to mage Coherence clusters. ", + "text": " If using VMWare Tanzu the Coherence Operator can be installed as a package. Under the covers, Tanzu uses the Carvel tool set to deploy packages. The Carvel tools can be used outside Tanzu, so the Coherence Operator repo and package images could also be deployed using a standalone Carvel kapp-controller . The Coherence Operator release published two images required to deploy the Operator as a Tanzu package. ghcr.io/oracle/coherence-operator-package:3.4.1 - the Coherence Operator package ghcr.io/oracle/coherence-operator-repo:3.4.1 - the Coherence Operator repository Install the Coherence Repository The first step to deploy the Coherence Operator package in Tanzu is to add the repository. This can be done using the Tanzu CLI. tanzu package repository add coherence-repo \\ --url ghcr.io/oracle/coherence-operator-repo:3.3.5 \\ --namespace coherence \\ --create-namespace The installed repositories can be listed using the CLI: tanzu package repository list --namespace coherence which should display something like the following NAME REPOSITORY TAG STATUS DETAILS coherence-repo ghcr.io/oracle/coherence-operator-repo 1h Reconcile succeeded The available packages in the Coherence repository can also be displayed using the CLI tanzu package available list --namespace coherence which should include the Operator package, coherence-operator.oracle.github.com something like the following NAME DISPLAY-NAME SHORT-DESCRIPTION LATEST-VERSION coherence-operator.oracle.github.com Oracle Coherence Operator A Kubernetes operator for managing Oracle Coherence clusters 3.3.5 Install the Coherence Operator Package Once the Coherence Operator repository has been installed, the coherence-operator.oracle.github.com package can be installed, which will install the Coherence Operator itself. tanzu package install coherence \\ --package-name coherence-operator.oracle.github.com \\ --version 3.3.5 \\ --namespace coherence The Tanzu CLI will display the various steps it is going through to install the package and if all goes well, finally display Added installed package 'coherence' The packages installed in the coherence namespace can be displayed using the CLI. tanzu package installed list --namespace coherence which should display the Coherence Operator package. NAME PACKAGE-NAME PACKAGE-VERSION STATUS coherence coherence-operator.oracle.github.com 3.3.5 Reconcile succeeded The Operator is now installed and ready to mage Coherence clusters. ", "title": "Install as a VMWare Tanzu Package (Carvel kapp-controller)" }, { @@ -352,7 +352,7 @@ }, { "location": "/docs/other/041_global_labels", - "text": " When installing the Operator using the manifest yaml files, additional command line flags can be configured by manually editing the yaml file before installing. Download the yaml manifest file from the GitHub repo https://github.com/oracle/coherence-operator/releases/download/v3.4.0/coherence-operator.yaml Find the section of the yaml file the defines the Operator container args, the default looks like this - args: - operator - --enable-leader-election Then edit the argument list to add the required --global-label and --global-annotation flags. For example, to add the same --global-label one=label-one --global-annotation foo=bar --global-label two=label-two flags, the file would look like this: - args: - operator - --enable-leader-election - --global-label - one=label-one - --global-annotation - foo=bar - --global-label - two=label-two` Important Container arguments must each be a separate entry in the arg list. This is valid - args: - operator - --enable-leader-election - --global-label - one=label-one This is not valid - args: - operator - --enable-leader-election - --global-label one=label-one ", + "text": " When installing the Operator using the manifest yaml files, additional command line flags can be configured by manually editing the yaml file before installing. Download the yaml manifest file from the GitHub repo https://github.com/oracle/coherence-operator/releases/download/v3.4.1/coherence-operator.yaml Find the section of the yaml file the defines the Operator container args, the default looks like this - args: - operator - --enable-leader-election Then edit the argument list to add the required --global-label and --global-annotation flags. For example, to add the same --global-label one=label-one --global-annotation foo=bar --global-label two=label-two flags, the file would look like this: - args: - operator - --enable-leader-election - --global-label - one=label-one - --global-annotation - foo=bar - --global-label - two=label-two` Important Container arguments must each be a separate entry in the arg list. This is valid - args: - operator - --enable-leader-election - --global-label - one=label-one This is not valid - args: - operator - --enable-leader-election - --global-label one=label-one ", "title": "Installing Using the Manifest Files" }, { @@ -362,7 +362,7 @@ }, { "location": "/docs/other/041_global_labels", - "text": " The Operator runner binary has various command line flags that can be specified on its command line. Two of these flags when starting the Operator are: --global-label to specify a global label key and value --global-annotation to specify a global annotation key and value Both of these command line flags can be specified multiple times if required. For example: runner operator --global-label one=label-one --global-annoataion foo=bar --global-label two=label-two The command above will start the Operator with two global labels, one=label-one and two=labl-two and with one global annotation foo=bar . The Operator will then apply these labels and annotations to every Kubernetes resource that it creates. Installing Using the Manifest Files When installing the Operator using the manifest yaml files, additional command line flags can be configured by manually editing the yaml file before installing. Download the yaml manifest file from the GitHub repo https://github.com/oracle/coherence-operator/releases/download/v3.4.0/coherence-operator.yaml Find the section of the yaml file the defines the Operator container args, the default looks like this - args: - operator - --enable-leader-election Then edit the argument list to add the required --global-label and --global-annotation flags. For example, to add the same --global-label one=label-one --global-annotation foo=bar --global-label two=label-two flags, the file would look like this: - args: - operator - --enable-leader-election - --global-label - one=label-one - --global-annotation - foo=bar - --global-label - two=label-two` Important Container arguments must each be a separate entry in the arg list. This is valid - args: - operator - --enable-leader-election - --global-label - one=label-one This is not valid - args: - operator - --enable-leader-election - --global-label one=label-one Installing Using the Helm Chart If installing the Operator using the Helm chart, the global labels and annotations can be specified as values as part of the Helm command or in a values file. For example, to add the same --global-label one=label-one --global-annotation foo=bar --global-label two=label-two flags, create a simple values file: globalLabels: one: \"label-one\" two: \"label-two\" globalAnnotations: foo: \"bar\" Use the values file when installing the Helm chart helm install \\ --namespace <namespace> \\ --values global-values.yaml coherence \\ coherence/coherence-operator Note When setting the Helm chart values globalLabels or globalAnnotations any labels and annotations specified will also be applied to all the resources installed by the Helm Chart too. ", + "text": " The Operator runner binary has various command line flags that can be specified on its command line. Two of these flags when starting the Operator are: --global-label to specify a global label key and value --global-annotation to specify a global annotation key and value Both of these command line flags can be specified multiple times if required. For example: runner operator --global-label one=label-one --global-annoataion foo=bar --global-label two=label-two The command above will start the Operator with two global labels, one=label-one and two=labl-two and with one global annotation foo=bar . The Operator will then apply these labels and annotations to every Kubernetes resource that it creates. Installing Using the Manifest Files When installing the Operator using the manifest yaml files, additional command line flags can be configured by manually editing the yaml file before installing. Download the yaml manifest file from the GitHub repo https://github.com/oracle/coherence-operator/releases/download/v3.4.1/coherence-operator.yaml Find the section of the yaml file the defines the Operator container args, the default looks like this - args: - operator - --enable-leader-election Then edit the argument list to add the required --global-label and --global-annotation flags. For example, to add the same --global-label one=label-one --global-annotation foo=bar --global-label two=label-two flags, the file would look like this: - args: - operator - --enable-leader-election - --global-label - one=label-one - --global-annotation - foo=bar - --global-label - two=label-two` Important Container arguments must each be a separate entry in the arg list. This is valid - args: - operator - --enable-leader-election - --global-label - one=label-one This is not valid - args: - operator - --enable-leader-election - --global-label one=label-one Installing Using the Helm Chart If installing the Operator using the Helm chart, the global labels and annotations can be specified as values as part of the Helm command or in a values file. For example, to add the same --global-label one=label-one --global-annotation foo=bar --global-label two=label-two flags, create a simple values file: globalLabels: one: \"label-one\" two: \"label-two\" globalAnnotations: foo: \"bar\" Use the values file when installing the Helm chart helm install \\ --namespace <namespace> \\ --values global-values.yaml coherence \\ coherence/coherence-operator Note When setting the Helm chart values globalLabels or globalAnnotations any labels and annotations specified will also be applied to all the resources installed by the Helm Chart too. ", "title": "Specify Global Labels and Annotations when Installing the Operator" }, { @@ -522,7 +522,7 @@ }, { "location": "/examples/095_network_policies/README", - "text": " The test client is able to test connectivity to any host and port. For example suppose we want to simulate a Prometheus Pod connecting to the metrics port of a Coherence cluster. The server simulator is listening on port 9612, so we need to run the client to connect to that port. We can create a Job yaml file to run the test client. As the test will simulate a Prometheus client we add the labels that a standard Prometheus Pod would have and that we also use in the network policies in this example. In the Job yaml, we need to set the HOST , PORT and optionally the PROTOCOL environment variables. In this test, the host is the DNS name for the Service created for the Coherence server simulator net-test-coherence-server.coh-test.svc , the port is the metrics port 9612 and the protocol is tcp . apiVersion: batch/v1 kind: Job metadata: name: test-client labels: app.kubernetes.io/name: prometheus coherenceNetTest: client spec: template: metadata: labels: app.kubernetes.io/name: prometheus coherenceNetTest: client spec: containers: - name: net-test image: ghcr.io/oracle/coherence-operator:3.4.0 env: - name: HOST value: net-test-coherence-server.coh-test.svc - name: PORT value: \"9612\" - name: PROTOCOL value: tcp command: - /files/runner args: - net-test - client restartPolicy: Never backoffLimit: 4 We need to run the test Job in the monitoring namespace, which is the same namespace that Prometheus is usually deployed into. kubectl -n monitoring apply -f examples/095_network_policies/manifests/net-test-client.yaml We can then check the results of the Job by looking at the Pod log. kubectl -n monitoring logs $(kubectl -n monitoring get pod -l 'coherenceNetTest=client' -o name) The output from a successful test will look like this: 1.6727665901488597e+09 INFO runner Operator Version: 3.3.2 1.6727665901497366e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727665901498337e+09 INFO runner Operator Built By: jonathanknight 1.6727665901498716e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727665901498966e+09 INFO runner Go Version: go1.19.2 1.6727665901499205e+09 INFO runner Go OS/Arch: linux/amd64 1.6727665901501486e+09 INFO net-test Starting test {\"Name\": \"Simple Client\"} 1.6727665901501985e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"net-test-coherence-server.coh-test.svc-9612\", \"Port\": 9612} 1.6727665901573336e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"net-test-coherence-server.coh-test.svc-9612\", \"Port\": 9612} We can see that the test client successfully connected to the Coherence cluster member simulator on port 9612. The test Job can then be deleted: kubectl -n monitoring delete -f examples/095_network_policies/manifests/net-test-client.yaml ", + "text": " The test client is able to test connectivity to any host and port. For example suppose we want to simulate a Prometheus Pod connecting to the metrics port of a Coherence cluster. The server simulator is listening on port 9612, so we need to run the client to connect to that port. We can create a Job yaml file to run the test client. As the test will simulate a Prometheus client we add the labels that a standard Prometheus Pod would have and that we also use in the network policies in this example. In the Job yaml, we need to set the HOST , PORT and optionally the PROTOCOL environment variables. In this test, the host is the DNS name for the Service created for the Coherence server simulator net-test-coherence-server.coh-test.svc , the port is the metrics port 9612 and the protocol is tcp . apiVersion: batch/v1 kind: Job metadata: name: test-client labels: app.kubernetes.io/name: prometheus coherenceNetTest: client spec: template: metadata: labels: app.kubernetes.io/name: prometheus coherenceNetTest: client spec: containers: - name: net-test image: ghcr.io/oracle/coherence-operator:3.4.1 env: - name: HOST value: net-test-coherence-server.coh-test.svc - name: PORT value: \"9612\" - name: PROTOCOL value: tcp command: - /files/runner args: - net-test - client restartPolicy: Never backoffLimit: 4 We need to run the test Job in the monitoring namespace, which is the same namespace that Prometheus is usually deployed into. kubectl -n monitoring apply -f examples/095_network_policies/manifests/net-test-client.yaml We can then check the results of the Job by looking at the Pod log. kubectl -n monitoring logs $(kubectl -n monitoring get pod -l 'coherenceNetTest=client' -o name) The output from a successful test will look like this: 1.6727665901488597e+09 INFO runner Operator Version: 3.3.2 1.6727665901497366e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727665901498337e+09 INFO runner Operator Built By: jonathanknight 1.6727665901498716e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727665901498966e+09 INFO runner Go Version: go1.19.2 1.6727665901499205e+09 INFO runner Go OS/Arch: linux/amd64 1.6727665901501486e+09 INFO net-test Starting test {\"Name\": \"Simple Client\"} 1.6727665901501985e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"net-test-coherence-server.coh-test.svc-9612\", \"Port\": 9612} 1.6727665901573336e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"net-test-coherence-server.coh-test.svc-9612\", \"Port\": 9612} We can see that the test client successfully connected to the Coherence cluster member simulator on port 9612. The test Job can then be deleted: kubectl -n monitoring delete -f examples/095_network_policies/manifests/net-test-client.yaml ", "title": "Testing Ad-Hoc Ports" }, { @@ -537,12 +537,12 @@ }, { "location": "/examples/095_network_policies/README", - "text": " At the time of writing this documentation, Kubernetes provides no way to verify the correctness of network policies. It is easy to mess up a policy, in which case policies will either block too much traffic, in which case your application will work, or worse they will not be blocking access and leave a security hole. As we have had various requests for help from customers who cannot get Coherence to work with network policies enabled, the Operator has a simple utility to test connectivity outside of Coherence. This will allow testing pf policies without the complications of having to start a Coherence server. This example includes some simple yaml files that will create simulator Pods that listen on all the ports used by the Operator and by a Coherence cluster member. These simulator Pods are configured with the same labels that the real Operator and Coherence Pods would have and the same labels used by the network policies in this example. Also included are some yaml files that start a test client, that simulates either the Operator connecting to Coherence Pods or a Coherence Pod connecting to the Operator and to other Coherence Pods. To run these tests, the Operator does not have to be installed. Create the Test Namespaces In this example we will assume the Operator will eventually be running in a namespace called coherence and the Coherence cluster will run in a namespace called coh-test . We can create the namespaces using kubectl kubectl create ns coherence kubectl create ns coh-test At this point there are no network policies installed, this will allow us to confirm the connectivity tests work. Start the Operator Simulator The Operator simulator server should run in the coherence namespace. It can be created using the following command: kubectl -n coherence apply -f examples/095_network_policies/manifests/net-test-operator-server.yaml Start the Coherence Cluster Simulator The Coherence cluster member simulator server should run in the coh-test namespace. It can be created using the following command: kubectl -n coh-test apply -f examples/095_network_policies/manifests/net-test-coherence-server.yaml Run the Operator Test We can now run the Operator test Job. This wil run a Kubernetes Job that simulates the Operator connecting to the Kubernetes API server and to the Operator Pods. kubectl -n coherence apply -f examples/095_network_policies/manifests/net-test-operator.yaml The test Job should complete very quickly as it is only testing connectivity to various ports. The results of the test can be seen by looking at the Pod log. The command below will display the log: kubectl -n coherence logs $(kubectl -n coherence get pod -l 'coherenceNetTest=operator-client' -o name) The output from a successful test will look like this: 1.6727606592497227e+09 INFO runner Operator Version: 3.3.2 1.6727606592497835e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727606592500978e+09 INFO runner Operator Built By: jonathanknight 1.6727606592501197e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727606592501485e+09 INFO runner Go Version: go1.19.2 1.6727606592501757e+09 INFO runner Go OS/Arch: linux/amd64 1.6727606592504115e+09 INFO net-test Starting test {\"Name\": \"Operator Simulator\"} 1.6727606592504556e+09 INFO net-test Testing connectivity {\"PortName\": \"K8s API Server\"} 1.6727606592664087e+09 INFO net-test Testing connectivity PASSED {\"PortName\": \"K8s API Server\", \"Version\": \"v1.24.7\"} 1.6727606592674055e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676} 1.6727606592770455e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676} We can see that the test has connected to the Kubernetes API server and has connected to the health port on the Coherence cluster test server in the coh-test namespace. The test Job can then be deleted: kubectl -n coherence delete -f examples/095_network_policies/manifests/net-test-operator.yaml Run the Cluster Member Test The cluster member test simulates a Coherence cluster member connecting to other cluster members in the same namespace and also making calls to the Operator’s REST endpoint. kubectl -n coh-test apply -f examples/095_network_policies/manifests/net-test-coherence.yaml Again, the test should complete quickly as it is just connecting to various ports. The results of the test can be seen by looking at the Pod log. The command below will display the log: kubectl -n coh-test logs $(kubectl -n coh-test get pod -l 'coherenceNetTest=coherence-client' -o name) The output from a successful test will look like this: 1.6727631152848177e+09 INFO runner Operator Version: 3.3.2 1.6727631152849226e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727631152849536e+09 INFO runner Operator Built By: jonathanknight 1.6727631152849755e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727631152849965e+09 INFO runner Go Version: go1.19.2 1.6727631152850187e+09 INFO runner Go OS/Arch: linux/amd64 1.6727631152852216e+09 INFO net-test Starting test {\"Name\": \"Cluster Member Simulator\"} 1.6727631152852666e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"UnicastPort1\", \"Port\": 7575} 1.6727631152997334e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"UnicastPort1\", \"Port\": 7575} 1.6727631152998908e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"UnicastPort2\", \"Port\": 7576} 1.6727631153059115e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"UnicastPort2\", \"Port\": 7576} 1.6727631153063197e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Management\", \"Port\": 30000} 1.6727631153116117e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Management\", \"Port\": 30000} 1.6727631153119817e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Metrics\", \"Port\": 9612} 1.6727631153187876e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Metrics\", \"Port\": 9612} 1.6727631153189638e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-operator-server.coherence.svc\", \"PortName\": \"OperatorRest\", \"Port\": 8000} 1.6727631153265746e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-operator-server.coherence.svc\", \"PortName\": \"OperatorRest\", \"Port\": 8000} 1.6727631153267298e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Echo\", \"Port\": 7} 1.6727631153340726e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Echo\", \"Port\": 7} 1.6727631153342876e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"ClusterPort\", \"Port\": 7574} 1.6727631153406997e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"ClusterPort\", \"Port\": 7574} The test client successfully connected to the Coherence cluster port (7475), the two unicast ports (7575 and 7576), the Coherence management port (30000), the Coherence metrics port (9612), the Operator REST port (8000), and the echo port (7). The test Job can then be deleted: kubectl -n coh-test delete -f examples/095_network_policies/manifests/net-test-coherence.yaml Testing the Operator Web Hook The Operator has a web-hook that k8s calls to validate Coherence resource configurations and to provide default values. Web hooks in Kubernetes use TLS by default and listen on port 443. The Operator server simulator also listens on port 443 to allow this connectivity to be tested. The network policy in this example that allows ingress to the web-hook allows any client to connect. This is because it is not always simple to work out the IP address that the API server will connect to the web-hook from. We can use the network tester to simulate this by running a Job that will connect to the web hook port. The web-hook test job in this example does not label the Pod and can be run from the default namespace to simulate a random external connection. kubectl -n default apply -f examples/095_network_policies/manifests/net-test-webhook.yaml We can then check the results of the Job by looking at the Pod log. kubectl -n default logs $(kubectl -n default get pod -l 'coherenceNetTest=webhook-client' -o name) The output from a successful test will look like this: 1.6727639834559627e+09 INFO runner Operator Version: 3.3.2 1.6727639834562948e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727639834563956e+09 INFO runner Operator Built By: jonathanknight 1.6727639834565024e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727639834566057e+09 INFO runner Go Version: go1.19.2 1.6727639834567096e+09 INFO runner Go OS/Arch: linux/amd64 1.6727639834570327e+09 INFO net-test Starting test {\"Name\": \"Web-Hook Client\"} 1.6727639834571698e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-operator-server.coherence.svc\", \"PortName\": \"WebHook\", \"Port\": 443} 1.6727639834791095e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-operator-server.coherence.svc\", \"PortName\": \"WebHook\", \"Port\": 443} We can see that the client successfully connected to port 443. The test Job can then be deleted: kubectl -n default delete -f examples/095_network_policies/manifests/net-test-webhook.yaml Testing Ad-Hoc Ports The test client is able to test connectivity to any host and port. For example suppose we want to simulate a Prometheus Pod connecting to the metrics port of a Coherence cluster. The server simulator is listening on port 9612, so we need to run the client to connect to that port. We can create a Job yaml file to run the test client. As the test will simulate a Prometheus client we add the labels that a standard Prometheus Pod would have and that we also use in the network policies in this example. In the Job yaml, we need to set the HOST , PORT and optionally the PROTOCOL environment variables. In this test, the host is the DNS name for the Service created for the Coherence server simulator net-test-coherence-server.coh-test.svc , the port is the metrics port 9612 and the protocol is tcp . apiVersion: batch/v1 kind: Job metadata: name: test-client labels: app.kubernetes.io/name: prometheus coherenceNetTest: client spec: template: metadata: labels: app.kubernetes.io/name: prometheus coherenceNetTest: client spec: containers: - name: net-test image: ghcr.io/oracle/coherence-operator:3.4.0 env: - name: HOST value: net-test-coherence-server.coh-test.svc - name: PORT value: \"9612\" - name: PROTOCOL value: tcp command: - /files/runner args: - net-test - client restartPolicy: Never backoffLimit: 4 We need to run the test Job in the monitoring namespace, which is the same namespace that Prometheus is usually deployed into. kubectl -n monitoring apply -f examples/095_network_policies/manifests/net-test-client.yaml We can then check the results of the Job by looking at the Pod log. kubectl -n monitoring logs $(kubectl -n monitoring get pod -l 'coherenceNetTest=client' -o name) The output from a successful test will look like this: 1.6727665901488597e+09 INFO runner Operator Version: 3.3.2 1.6727665901497366e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727665901498337e+09 INFO runner Operator Built By: jonathanknight 1.6727665901498716e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727665901498966e+09 INFO runner Go Version: go1.19.2 1.6727665901499205e+09 INFO runner Go OS/Arch: linux/amd64 1.6727665901501486e+09 INFO net-test Starting test {\"Name\": \"Simple Client\"} 1.6727665901501985e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"net-test-coherence-server.coh-test.svc-9612\", \"Port\": 9612} 1.6727665901573336e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"net-test-coherence-server.coh-test.svc-9612\", \"Port\": 9612} We can see that the test client successfully connected to the Coherence cluster member simulator on port 9612. The test Job can then be deleted: kubectl -n monitoring delete -f examples/095_network_policies/manifests/net-test-client.yaml Test with Network Policies All the above tests ran successfully without any network policies. We can now start to apply policies and re-run the tests to see what happens. In a secure environment we would start with a policy that blocks all access and then gradually open up required ports. We can apply the deny-all.yaml policy and then re-run the tests. We should apply the policy to both of the namespaces we are using in this example: kubectl -n coherence apply -f examples/095_network_policies/manifests/deny-all.yaml kubectl -n coh-test apply -f examples/095_network_policies/manifests/deny-all.yaml Now, re-run the Operator test client: kubectl -n coherence apply -f examples/095_network_policies/manifests/net-test-operator.yaml and check the result: kubectl -n coherence logs $(kubectl -n coherence get pod -l 'coherenceNetTest=operator-client' -o name) 1.6727671834237397e+09 INFO runner Operator Version: 3.3.2 1.6727671834238796e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727671834239576e+09 INFO runner Operator Built By: jonathanknight 1.6727671834240365e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727671834240875e+09 INFO runner Go Version: go1.19.2 1.6727671834241736e+09 INFO runner Go OS/Arch: linux/amd64 1.6727671834244306e+09 INFO net-test Starting test {\"Name\": \"Operator Simulator\"} 1.6727671834245417e+09 INFO net-test Testing connectivity {\"PortName\": \"K8s API Server\"} 1.6727672134268515e+09 INFO net-test Testing connectivity FAILED {\"PortName\": \"K8s API Server\", \"Error\": \"Get \\\"https://10.96.0.1:443/version?timeout=32s\\\": dial tcp 10.96.0.1:443: i/o timeout\"} 1.6727672134269848e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676} 1.6727672234281697e+09 INFO net-test Testing connectivity FAILED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676, \"Error\": \"dial tcp: lookup net-test-coherence-server.coh-test.svc: i/o timeout\"} We can see that the test client failed to connect to the Kubernetes API server and failed to connect to the Coherence cluster health port. This means the deny-all policy is working. We can now apply the various polices to fix the test kubectl -n coherence apply -f examples/095_network_policies/manifests/allow-dns.yaml kubectl -n coherence apply -f examples/095_network_policies/manifests/allow-k8s-api-server.yaml kubectl -n coherence apply -f examples/095_network_policies/manifests/allow-operator-cluster-member-egress.yaml kubectl -n coherence apply -f examples/095_network_policies/manifests/allow-operator-rest-ingress.yaml kubectl -n coherence apply -f examples/095_network_policies/manifests/allow-webhook-ingress-from-all.yaml kubectl -n coh-test apply -f examples/095_network_policies/manifests/allow-dns.yaml kubectl -n coh-test apply -f examples/095_network_policies/manifests/allow-cluster-member-access.yaml kubectl -n coh-test apply -f examples/095_network_policies/manifests/allow-cluster-member-operator-access.yaml kubectl -n coh-test apply -f examples/095_network_policies/manifests/allow-metrics-ingress.yaml Now, delete and re-run the Operator test client: kubectl -n coherence delete -f examples/095_network_policies/manifests/net-test-operator.yaml kubectl -n coherence apply -f examples/095_network_policies/manifests/net-test-operator.yaml and check the result: kubectl -n coherence logs $(kubectl -n coherence get pod -l 'coherenceNetTest=operator-client' -o name) Now with the policies applied the test should have passed. 1.6727691273634596e+09 INFO runner Operator Version: 3.3.2 1.6727691273635025e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727691273635256e+09 INFO runner Operator Built By: jonathanknight 1.6727691273635616e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727691273637156e+09 INFO runner Go Version: go1.19.2 1.6727691273637407e+09 INFO runner Go OS/Arch: linux/amd64 1.6727691273639407e+09 INFO net-test Starting test {\"Name\": \"Operator Simulator\"} 1.6727691273639877e+09 INFO net-test Testing connectivity {\"PortName\": \"K8s API Server\"} 1.6727691273857167e+09 INFO net-test Testing connectivity PASSED {\"PortName\": \"K8s API Server\", \"Version\": \"v1.24.7\"} 1.6727691273858056e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676} 1.6727691273933685e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676} The other tests can also be re-run and should also pass. Clean-Up Once the tests are completed, the test servers and Jobs can be deleted. kubectl -n coherence delete -f examples/095_network_policies/manifests/net-test-operator-server.yaml kubectl -n coh-test delete -f examples/095_network_policies/manifests/net-test-coherence-server.yaml ", + "text": " At the time of writing this documentation, Kubernetes provides no way to verify the correctness of network policies. It is easy to mess up a policy, in which case policies will either block too much traffic, in which case your application will work, or worse they will not be blocking access and leave a security hole. As we have had various requests for help from customers who cannot get Coherence to work with network policies enabled, the Operator has a simple utility to test connectivity outside of Coherence. This will allow testing pf policies without the complications of having to start a Coherence server. This example includes some simple yaml files that will create simulator Pods that listen on all the ports used by the Operator and by a Coherence cluster member. These simulator Pods are configured with the same labels that the real Operator and Coherence Pods would have and the same labels used by the network policies in this example. Also included are some yaml files that start a test client, that simulates either the Operator connecting to Coherence Pods or a Coherence Pod connecting to the Operator and to other Coherence Pods. To run these tests, the Operator does not have to be installed. Create the Test Namespaces In this example we will assume the Operator will eventually be running in a namespace called coherence and the Coherence cluster will run in a namespace called coh-test . We can create the namespaces using kubectl kubectl create ns coherence kubectl create ns coh-test At this point there are no network policies installed, this will allow us to confirm the connectivity tests work. Start the Operator Simulator The Operator simulator server should run in the coherence namespace. It can be created using the following command: kubectl -n coherence apply -f examples/095_network_policies/manifests/net-test-operator-server.yaml Start the Coherence Cluster Simulator The Coherence cluster member simulator server should run in the coh-test namespace. It can be created using the following command: kubectl -n coh-test apply -f examples/095_network_policies/manifests/net-test-coherence-server.yaml Run the Operator Test We can now run the Operator test Job. This wil run a Kubernetes Job that simulates the Operator connecting to the Kubernetes API server and to the Operator Pods. kubectl -n coherence apply -f examples/095_network_policies/manifests/net-test-operator.yaml The test Job should complete very quickly as it is only testing connectivity to various ports. The results of the test can be seen by looking at the Pod log. The command below will display the log: kubectl -n coherence logs $(kubectl -n coherence get pod -l 'coherenceNetTest=operator-client' -o name) The output from a successful test will look like this: 1.6727606592497227e+09 INFO runner Operator Version: 3.3.2 1.6727606592497835e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727606592500978e+09 INFO runner Operator Built By: jonathanknight 1.6727606592501197e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727606592501485e+09 INFO runner Go Version: go1.19.2 1.6727606592501757e+09 INFO runner Go OS/Arch: linux/amd64 1.6727606592504115e+09 INFO net-test Starting test {\"Name\": \"Operator Simulator\"} 1.6727606592504556e+09 INFO net-test Testing connectivity {\"PortName\": \"K8s API Server\"} 1.6727606592664087e+09 INFO net-test Testing connectivity PASSED {\"PortName\": \"K8s API Server\", \"Version\": \"v1.24.7\"} 1.6727606592674055e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676} 1.6727606592770455e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676} We can see that the test has connected to the Kubernetes API server and has connected to the health port on the Coherence cluster test server in the coh-test namespace. The test Job can then be deleted: kubectl -n coherence delete -f examples/095_network_policies/manifests/net-test-operator.yaml Run the Cluster Member Test The cluster member test simulates a Coherence cluster member connecting to other cluster members in the same namespace and also making calls to the Operator’s REST endpoint. kubectl -n coh-test apply -f examples/095_network_policies/manifests/net-test-coherence.yaml Again, the test should complete quickly as it is just connecting to various ports. The results of the test can be seen by looking at the Pod log. The command below will display the log: kubectl -n coh-test logs $(kubectl -n coh-test get pod -l 'coherenceNetTest=coherence-client' -o name) The output from a successful test will look like this: 1.6727631152848177e+09 INFO runner Operator Version: 3.3.2 1.6727631152849226e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727631152849536e+09 INFO runner Operator Built By: jonathanknight 1.6727631152849755e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727631152849965e+09 INFO runner Go Version: go1.19.2 1.6727631152850187e+09 INFO runner Go OS/Arch: linux/amd64 1.6727631152852216e+09 INFO net-test Starting test {\"Name\": \"Cluster Member Simulator\"} 1.6727631152852666e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"UnicastPort1\", \"Port\": 7575} 1.6727631152997334e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"UnicastPort1\", \"Port\": 7575} 1.6727631152998908e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"UnicastPort2\", \"Port\": 7576} 1.6727631153059115e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"UnicastPort2\", \"Port\": 7576} 1.6727631153063197e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Management\", \"Port\": 30000} 1.6727631153116117e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Management\", \"Port\": 30000} 1.6727631153119817e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Metrics\", \"Port\": 9612} 1.6727631153187876e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Metrics\", \"Port\": 9612} 1.6727631153189638e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-operator-server.coherence.svc\", \"PortName\": \"OperatorRest\", \"Port\": 8000} 1.6727631153265746e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-operator-server.coherence.svc\", \"PortName\": \"OperatorRest\", \"Port\": 8000} 1.6727631153267298e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Echo\", \"Port\": 7} 1.6727631153340726e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Echo\", \"Port\": 7} 1.6727631153342876e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"ClusterPort\", \"Port\": 7574} 1.6727631153406997e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"ClusterPort\", \"Port\": 7574} The test client successfully connected to the Coherence cluster port (7475), the two unicast ports (7575 and 7576), the Coherence management port (30000), the Coherence metrics port (9612), the Operator REST port (8000), and the echo port (7). The test Job can then be deleted: kubectl -n coh-test delete -f examples/095_network_policies/manifests/net-test-coherence.yaml Testing the Operator Web Hook The Operator has a web-hook that k8s calls to validate Coherence resource configurations and to provide default values. Web hooks in Kubernetes use TLS by default and listen on port 443. The Operator server simulator also listens on port 443 to allow this connectivity to be tested. The network policy in this example that allows ingress to the web-hook allows any client to connect. This is because it is not always simple to work out the IP address that the API server will connect to the web-hook from. We can use the network tester to simulate this by running a Job that will connect to the web hook port. The web-hook test job in this example does not label the Pod and can be run from the default namespace to simulate a random external connection. kubectl -n default apply -f examples/095_network_policies/manifests/net-test-webhook.yaml We can then check the results of the Job by looking at the Pod log. kubectl -n default logs $(kubectl -n default get pod -l 'coherenceNetTest=webhook-client' -o name) The output from a successful test will look like this: 1.6727639834559627e+09 INFO runner Operator Version: 3.3.2 1.6727639834562948e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727639834563956e+09 INFO runner Operator Built By: jonathanknight 1.6727639834565024e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727639834566057e+09 INFO runner Go Version: go1.19.2 1.6727639834567096e+09 INFO runner Go OS/Arch: linux/amd64 1.6727639834570327e+09 INFO net-test Starting test {\"Name\": \"Web-Hook Client\"} 1.6727639834571698e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-operator-server.coherence.svc\", \"PortName\": \"WebHook\", \"Port\": 443} 1.6727639834791095e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-operator-server.coherence.svc\", \"PortName\": \"WebHook\", \"Port\": 443} We can see that the client successfully connected to port 443. The test Job can then be deleted: kubectl -n default delete -f examples/095_network_policies/manifests/net-test-webhook.yaml Testing Ad-Hoc Ports The test client is able to test connectivity to any host and port. For example suppose we want to simulate a Prometheus Pod connecting to the metrics port of a Coherence cluster. The server simulator is listening on port 9612, so we need to run the client to connect to that port. We can create a Job yaml file to run the test client. As the test will simulate a Prometheus client we add the labels that a standard Prometheus Pod would have and that we also use in the network policies in this example. In the Job yaml, we need to set the HOST , PORT and optionally the PROTOCOL environment variables. In this test, the host is the DNS name for the Service created for the Coherence server simulator net-test-coherence-server.coh-test.svc , the port is the metrics port 9612 and the protocol is tcp . apiVersion: batch/v1 kind: Job metadata: name: test-client labels: app.kubernetes.io/name: prometheus coherenceNetTest: client spec: template: metadata: labels: app.kubernetes.io/name: prometheus coherenceNetTest: client spec: containers: - name: net-test image: ghcr.io/oracle/coherence-operator:3.4.1 env: - name: HOST value: net-test-coherence-server.coh-test.svc - name: PORT value: \"9612\" - name: PROTOCOL value: tcp command: - /files/runner args: - net-test - client restartPolicy: Never backoffLimit: 4 We need to run the test Job in the monitoring namespace, which is the same namespace that Prometheus is usually deployed into. kubectl -n monitoring apply -f examples/095_network_policies/manifests/net-test-client.yaml We can then check the results of the Job by looking at the Pod log. kubectl -n monitoring logs $(kubectl -n monitoring get pod -l 'coherenceNetTest=client' -o name) The output from a successful test will look like this: 1.6727665901488597e+09 INFO runner Operator Version: 3.3.2 1.6727665901497366e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727665901498337e+09 INFO runner Operator Built By: jonathanknight 1.6727665901498716e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727665901498966e+09 INFO runner Go Version: go1.19.2 1.6727665901499205e+09 INFO runner Go OS/Arch: linux/amd64 1.6727665901501486e+09 INFO net-test Starting test {\"Name\": \"Simple Client\"} 1.6727665901501985e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"net-test-coherence-server.coh-test.svc-9612\", \"Port\": 9612} 1.6727665901573336e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"net-test-coherence-server.coh-test.svc-9612\", \"Port\": 9612} We can see that the test client successfully connected to the Coherence cluster member simulator on port 9612. The test Job can then be deleted: kubectl -n monitoring delete -f examples/095_network_policies/manifests/net-test-client.yaml Test with Network Policies All the above tests ran successfully without any network policies. We can now start to apply policies and re-run the tests to see what happens. In a secure environment we would start with a policy that blocks all access and then gradually open up required ports. We can apply the deny-all.yaml policy and then re-run the tests. We should apply the policy to both of the namespaces we are using in this example: kubectl -n coherence apply -f examples/095_network_policies/manifests/deny-all.yaml kubectl -n coh-test apply -f examples/095_network_policies/manifests/deny-all.yaml Now, re-run the Operator test client: kubectl -n coherence apply -f examples/095_network_policies/manifests/net-test-operator.yaml and check the result: kubectl -n coherence logs $(kubectl -n coherence get pod -l 'coherenceNetTest=operator-client' -o name) 1.6727671834237397e+09 INFO runner Operator Version: 3.3.2 1.6727671834238796e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727671834239576e+09 INFO runner Operator Built By: jonathanknight 1.6727671834240365e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727671834240875e+09 INFO runner Go Version: go1.19.2 1.6727671834241736e+09 INFO runner Go OS/Arch: linux/amd64 1.6727671834244306e+09 INFO net-test Starting test {\"Name\": \"Operator Simulator\"} 1.6727671834245417e+09 INFO net-test Testing connectivity {\"PortName\": \"K8s API Server\"} 1.6727672134268515e+09 INFO net-test Testing connectivity FAILED {\"PortName\": \"K8s API Server\", \"Error\": \"Get \\\"https://10.96.0.1:443/version?timeout=32s\\\": dial tcp 10.96.0.1:443: i/o timeout\"} 1.6727672134269848e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676} 1.6727672234281697e+09 INFO net-test Testing connectivity FAILED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676, \"Error\": \"dial tcp: lookup net-test-coherence-server.coh-test.svc: i/o timeout\"} We can see that the test client failed to connect to the Kubernetes API server and failed to connect to the Coherence cluster health port. This means the deny-all policy is working. We can now apply the various polices to fix the test kubectl -n coherence apply -f examples/095_network_policies/manifests/allow-dns.yaml kubectl -n coherence apply -f examples/095_network_policies/manifests/allow-k8s-api-server.yaml kubectl -n coherence apply -f examples/095_network_policies/manifests/allow-operator-cluster-member-egress.yaml kubectl -n coherence apply -f examples/095_network_policies/manifests/allow-operator-rest-ingress.yaml kubectl -n coherence apply -f examples/095_network_policies/manifests/allow-webhook-ingress-from-all.yaml kubectl -n coh-test apply -f examples/095_network_policies/manifests/allow-dns.yaml kubectl -n coh-test apply -f examples/095_network_policies/manifests/allow-cluster-member-access.yaml kubectl -n coh-test apply -f examples/095_network_policies/manifests/allow-cluster-member-operator-access.yaml kubectl -n coh-test apply -f examples/095_network_policies/manifests/allow-metrics-ingress.yaml Now, delete and re-run the Operator test client: kubectl -n coherence delete -f examples/095_network_policies/manifests/net-test-operator.yaml kubectl -n coherence apply -f examples/095_network_policies/manifests/net-test-operator.yaml and check the result: kubectl -n coherence logs $(kubectl -n coherence get pod -l 'coherenceNetTest=operator-client' -o name) Now with the policies applied the test should have passed. 1.6727691273634596e+09 INFO runner Operator Version: 3.3.2 1.6727691273635025e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727691273635256e+09 INFO runner Operator Built By: jonathanknight 1.6727691273635616e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727691273637156e+09 INFO runner Go Version: go1.19.2 1.6727691273637407e+09 INFO runner Go OS/Arch: linux/amd64 1.6727691273639407e+09 INFO net-test Starting test {\"Name\": \"Operator Simulator\"} 1.6727691273639877e+09 INFO net-test Testing connectivity {\"PortName\": \"K8s API Server\"} 1.6727691273857167e+09 INFO net-test Testing connectivity PASSED {\"PortName\": \"K8s API Server\", \"Version\": \"v1.24.7\"} 1.6727691273858056e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676} 1.6727691273933685e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676} The other tests can also be re-run and should also pass. Clean-Up Once the tests are completed, the test servers and Jobs can be deleted. kubectl -n coherence delete -f examples/095_network_policies/manifests/net-test-operator-server.yaml kubectl -n coh-test delete -f examples/095_network_policies/manifests/net-test-coherence-server.yaml ", "title": "Testing Network Policies" }, { "location": "/examples/095_network_policies/README", - "text": " This example covers running the Coherence Operator and Coherence clusters in Kubernetes with network policies. In Kubernetes, a Network Policy is an application-centric construct which allow you to specify how a pod is allowed to communicate with various network \"entities\" (we use the word \"entity\" here to avoid overloading the more common terms such as \"endpoints\" and \"services\", which have specific Kubernetes connotations) over the network. Note Network policies in Kubernetes are easy to get wrong if you are not careful. In this case a policy will either block traffic it should not, in which case your application will not work, or it will let traffic through it should block, which will be an invisible security hole. It is obviously important to test your policies, but Kubernetes offers next to zero visibility into what the policies are actually doing, as it is typically the network CNI extensions that are providing the policy implementation and each of these may work in a different way. Introduction Kubernetes network policies specify the access permissions for groups of pods, similar to security groups in the cloud are used to control access to VM instances and similar to firewalls. The default behaviour of a Kubernetes cluster is to allow all Pods to freely talk to each other. Whilst this sounds insecure, originally Kubernetes was designed to orchestrate services that communicated with each other, it was only later that network policies were added. A network policy is applied to a Kubernetes namespace and controls ingress into and egress out of Pods in that namespace. The ports specified in a NetworkPolicy are the ports exposed by the Pods , they are not any ports that may be exposed by any Service that exposes the Pod ports. For example, if a Pod exposed port 8080 and a Service exposing the Pod mapped port 80 in the Service to port 8080 in the Pod , the NetworkPolicy ingress rule would be for the Pod port 8080. Network polices would typically end up being dictated by corporate security standards where different companies may apply stricter or looser rules than others. The examples in this document start from the premise that everything will be blocked by a \"deny all\" policy and then opened up as needed. This is the most secure use of network policies, and hence the examples can easily be tweaked if looser rules are applied. This example has the following sections: Deny All Policy - denying all ingress and egress Allow DNS - almost every use case will require egress to DNS Coherence Operator Policies - the network policies required to run the Coherence Operator Kubernetes API Server - allow the Operator egress to the Kubernetes API server Coherence Clusters Pods - allow the Operator egress to the Coherence cluster Pods Web Hooks - allow ingress to the Operator’s web hook port Coherence Cluster Policies - the network policies required to run Coherence clusters Inter-Cluster Access - allow Coherence cluster Pods to communicate Coherence Operator - allow Coherence cluster Pods to communicate with the Operator Clients - allows access by Extend and gRPC clients Metrics - allow Coherence cluster member metrics to be scraped Testing Connectivity - using the Operator’s network connectivity test utility to test policies Deny All Policy Kubernetes does not have a “deny all” policy, but this can be achieved with a regular network policy that specifies a policyTypes of both 'Ingress` and Egress but omits any definitions. A wild-card podSelector: {} applies the policy to all Pods in the namespace. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: deny-all spec: podSelector: {} policyTypes: - Ingress - Egress ingress: [] egress: [] The policy above can be installed into the coherence namespace with the following command: kubectl -n coherence apply -f manifests/deny-all.yaml After installing the deny-all policy, any Pod in the coherence namespace will not be allowed either ingress, nor egress. Very secure, but probably impractical for almost all use cases. After applying the deny-all policy more polices can be added to gradually open up the required access to run the Coherence Operator and Coherence clusters. Allow DNS When enforcing egress, such as with the deny-all policy above, it is important to remember that virtually every Pod needs to communicate with other Pods or Services, and will therefore need to access DNS. The policy below allows all Pods (using podSelector: {} ) egress to both TCP and UDP on port 53 in all namespaces. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: allow-dns spec: podSelector: { } policyTypes: - Egress egress: - to: - namespaceSelector: { } ports: - protocol: UDP port: 53 # - protocol: TCP # port: 53 If allowing DNS egress to all namespaces is overly permissive, DNS could be further restricted to just the kube-system namespace, therefore restricting DNS lookups to only Kubernetes internal DNS. Kubernetes applies the kubernetes.io/metadata.name label to namespaces, and sets its value to the namespace name, so this can be used in label matchers. With the policy below, Pods will be able to use internal Kubernetes DNS only. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: allow-dns spec: podSelector: { } policyTypes: - Egress egress: - to: - namespaceSelector: matchLabels: kubernetes.io/metadata.name: kube-system ports: - protocol: UDP port: 53 # - protocol: TCP # port: 53 The policy above can be installed into the coherence namespace with the following command: kubectl -n coherence apply -f manifests/allow-dns-kube-system.yaml Tip Some documentation regarding allowing DNS with Kubernetes network policies only shows opening up UDP connections. During our testing with network policies, we discovered that with only UDP allowed any lookup for a fully qualified name would fail. For example nslookup my-service.my-namespace.svc would work, but the fully qualified nslookup my-service.my-namespace.svc.cluster.local would not. Adding TCP to the DNS policy allowed DNS lookups with .cluster.local to also work. Neither the Coherence Operator, nor Coherence itself use a fully qualified service name for a DNS lookup. It appears that Java’s InetAddress.findAllByName() method still works only with UDP, albeit extremely slowly. By default, the service name used for the Coherence WKA setting uses just the .svc suffix. Coherence Operator Policies Assuming the coherence namespace exists, and the deny-all and allow-dns policies described above have been applied, if the Coherence Operator is installed, it wil fail to start as it has no access to endpoints it needs to operate. The following sections will add network polices to allow the Coherence Operator to access Kubernetes services and Pods it requires. Access the Kubernetes API Server The Coherence Operator uses Kubernetes APIs to manage various resources in the Kubernetes cluster. For this to work, the Operator Pod must be allowed egress to the Kubernetes API server. Configuring access to the API server is not as straight forward as other network policies. The reason for this is that there is no Pod available with labels that can be used in the configuration, instead, the IP address of the API server itself must be used. There are various methods to find the IP address of the API server. The exact method required may vary depending on the type of Kubernetes cluster being used, for example a simple development cluster running in KinD on a laptop may differ from a cluster running in a cloud provider’s infrastructure. The common way to find the API server’s IP address is to use kubectl cluster-info as follows: $ kubectl cluster-info Kubernetes master is running at https://192.168.99.100:8443 In the above case the IP address of the API server would be 192.168.99.100 and the port is 8443 . In a simple KinD development cluster, the API server IP address can be obtained using kubectl as shown below: $ kubectl -n default get endpoints kubernetes -o json { \"apiVersion\": \"v1\", \"kind\": \"Endpoints\", \"metadata\": { \"creationTimestamp\": \"2023-02-08T10:31:26Z\", \"labels\": { \"endpointslice.kubernetes.io/skip-mirror\": \"true\" }, \"name\": \"kubernetes\", \"namespace\": \"default\", \"resourceVersion\": \"196\", \"uid\": \"68b0a7de-c0db-4524-a1a2-9d29eb137f28\" }, \"subsets\": [ { \"addresses\": [ { \"ip\": \"192.168.49.2\" } ], \"ports\": [ { \"name\": \"https\", \"port\": 8443, \"protocol\": \"TCP\" } ] } ] } In the above case the IP address of the API server would be 192.168.49.2 and the port is 8443 . The IP address displayed for the API server can then be used in the network policy. The policy shown below allows Pods with the app.kubernetes.io/name: coherence-operator label (which the Operator has) egress access to the API server. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: operator-to-apiserver-egress spec: podSelector: matchLabels: app.kubernetes.io/name: coherence-operator policyTypes: - Egress - Ingress egress: - to: - ipBlock: cidr: 172.18.0.2/24 - ipBlock: cidr: 10.96.0.1/24 ports: - port: 6443 protocol: TCP - port: 443 protocol: TCP The allow-k8s-api-server.yaml policy can be installed into the coherence namespace to allow the Operator to communicate with the API server. kubectl -n coherence apply -f manifests/allow-k8s-api-server.yaml With the allow-k8s-api-server.yaml policy applied, the Coherence Operator should now start correctly and its Pods should reach the \"ready\" state. Ingress From and Egress Into Coherence Cluster Member Pods When a Coherence cluster is deployed, on start-up of a Pod the cluster member will connect to the Operator’s REST endpoint to query the site name and rack name, based on the Node the Coherence member is running on. To allow this to happen the Operator needs to be configured with the relevant ingress policy. The coherence-operator-rest-ingress policy applies to the Operator Pod, as it has a podSelector label of app.kubernetes.io/name: coherence-operator , which is a label applied to the Operator Pod. The policy allows any Pod with the label coherenceComponent: coherencePod ingress into the operator REST port. When the Operator creates a Coherence cluster, it applies the label coherenceComponent: coherencePod to all the Coherence cluster Pods. The policy below allows access from all namespaces using namespaceSelector: { } but it could be tightened up to specific namespaces if required. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: coherence-operator-rest-ingress spec: podSelector: matchLabels: app.kubernetes.io/name: coherence-operator policyTypes: - Ingress ingress: - from: - namespaceSelector: { } podSelector: matchLabels: coherenceComponent: coherencePod ports: - port: operator protocol: TCP During operations such as scaling and shutting down of a Coherence cluster, the Operator needs to connect to the health endpoint of the Coherence cluster Pods. The coherence-operator-cluster-member-egress policy below applies to the Operator Pod, as it has a podSelector label of app.kubernetes.io/name: coherence-operator , which is a label applied to the Operator Pod. The policy allows egress to the health port in any Pod with the label coherenceComponent: coherencePod . When the Operator creates a Coherence cluster, it applies the label coherenceComponent: coherencePod to all the Coherence cluster Pods. The policy below allows egress to Coherence Pods in all namespaces using namespaceSelector: { } but it could be tightened up to specific namespaces if required. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: coherence-operator-cluster-member-egress spec: podSelector: matchLabels: app.kubernetes.io/name: coherence-operator policyTypes: - Egress egress: - to: - namespaceSelector: { } podSelector: matchLabels: coherenceComponent: coherencePod ports: - port: health protocol: TCP The two policies can be applied to the coherence namespace. kubectl -n coherence apply -f manifests/allow-operator-rest-ingress.yaml kubectl -n coherence apply -f manifests/allow-operator-cluster-member-egress.yaml Webhook Ingress With all the above policies in place, the Operator is able to work correctly, but if a Coherence resource is now created Kubernetes will be unable to call the Operator’s webhook without the correct ingress policy. The following example demonstrates this. Assume there is a minimal`Coherence` yaml file named minimal.yaml that will create a single member Coherence cluster. apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: storage spec: replicas: 1 If minimal.yaml is applied using kubectl with a small timeout of 10 seconds, the creation of the resource will fail due to Kubernetes not having access to the Coherence Operator webhook. $ kubectl apply --timeout=10s -f minimal.yaml Error from server (InternalError): error when creating \"minimal.yaml\": Internal error occurred: failed calling webhook \"coherence.oracle.com\": failed to call webhook: Post \"https://coherence-operator-webhook.operator-test.svc:443/mutate-coherence-oracle-com-v1-coherence?timeout=10s\": context deadline exceeded The simplest solution is to allow ingress from any IP address to the webhook on port, with a policy like that shown below. This policy uses and empty from: [] attribute, which allows access from anywhere to the webhook-server port in the Pod. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: apiserver-to-operator-webhook-ingress spec: podSelector: matchLabels: app.kubernetes.io/name: coherence-operator policyTypes: - Ingress ingress: - from: [] ports: - port: webhook-server protocol: TCP Allowing access to the webhook from anywhere is not very secure, so a more restrictive from attribute could be used to limit access to the IP address (or addresses) of the Kubernetes API server. As with the API server policy above, the trick here is knowing the API server addresses to use. The policy below only allows access from specific addresses: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: apiserver-to-operator-webhook-ingress spec: podSelector: matchLabels: app.kubernetes.io/name: coherence-operator policyTypes: - Ingress ingress: - from: - ipBlock: cidr: 172.18.0.2/24 - ipBlock: cidr: 10.96.0.1/24 ports: - port: webhook-server protocol: TCP - port: 443 protocol: TCP Coherence Cluster Member Policies Once the policies are in place to allow the Coherence Operator to work, the policies to allow Coherence clusters to run can be put in place. The exact set of policies requires will vary depending on the Coherence functionality being used. If Coherence is embedded in another application, such as a web-server, then additional policies may also be needed to allow ingress to other endpoints. Conversely, if the Coherence application needs access to other services, for example a database, then additional egress policies may need to be created. This example is only going to cover Coherence use cases, but it should be simple enough to apply the same techniques to policies for other applications. Access Other Cluster Members All Pods in a Coherence cluster must be able to talk to each other (otherwise they wouldn’t be a cluster). This means that there needs to be ingress and egress policies to allow this. Cluster port : The default cluster port is 7574, and there is almost never any need to change this, especially in a containerised environment where there is little chance of port conflicts. Unicast ports : Unicast uses TMB (default) and UDP. Each cluster member listens on one UDP and one TCP port and both ports need to be opened in the network policy. The default behaviour of Coherence is for the unicast ports to be automatically assigned from the operating system’s available ephemeral port range. When securing Coherence with network policies, the use of ephemeral ports will not work, so a range of ports can be specified for coherence to operate within. The Coherence Operator sets values for both unicast ports so that ephemeral ports will not be used. The default values are 7575 and 7576 . The two unicast ports can be changed in the Coherence spec by setting the spec.coherence.localPort field, and the spec.coherence.localPortAdjust field for example: apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: storage spec: coherence: localPort: 9000 localPortAdjust: 9001 Alternatively the values can also be configured using environment variables env: - name: COHERENCE_LOCALPORT value: \"9000\" - name: COHERENCE_LOCALPORT_ADJUST value: \"9001\" Echo port 7 : The default TCP port of the IpMonitor component that is used for detecting hardware failure of cluster members. Coherence doesn’t bind to this port, it only tries to connect to it as a means of pinging remote machines, or in this case Pods. The Coherence Operator applies the coherenceComponent: coherencePod label to all Coherence Pods, so this can be used in the network policy podSelector , to apply the policy to only the Coherence Pods. The policy below works with the default ports configured by the Operator. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: allow-coherence-cluster spec: podSelector: matchLabels: coherenceComponent: coherencePod policyTypes: - Ingress - Egress ingress: - from: - podSelector: matchLabels: coherenceComponent: coherencePod ports: - port: 7574 endPort: 7576 protocol: TCP - port: 7574 endPort: 7576 protocol: UDP - port: 7 protocol: TCP egress: - to: - podSelector: matchLabels: coherenceComponent: coherencePod ports: - port: 7574 endPort: 7576 protocol: TCP - port: 7574 endPort: 7576 protocol: UDP - port: 7 protocol: TCP If the Coherence local port and local port adjust values are changed, then the policy would need to be amended. For example, if COHERENCE_LOCALPORT=9000 and COHERENCE_LOCALPORT_ADJUST=9100 apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: allow-coherence-cluster spec: podSelector: matchLabels: coherenceComponent: coherencePod policyTypes: - Ingress - Egress ingress: - from: - podSelector: matchLabels: coherenceComponent: coherencePod ports: - port: 7574 protocol: TCP - port: 7574 protocol: UDP - port: 9000 endPort: 9100 protocol: TCP - port: 9000 endPort: 9100 protocol: UDP - port: 7 protocol: TCP egress: - to: - podSelector: matchLabels: coherenceComponent: coherencePod ports: - port: 7574 protocol: TCP - port: 7574 protocol: UDP - port: 9000 endPort: 9100 protocol: TCP - port: 9000 endPort: 9100 protocol: UDP - port: 7 protocol: TCP Both of the policies above should be applied to the namespace where the Coherence cluster will be deployed. With the two policies above in place, the Coherence Pods will be able to communicate. Egress to and Ingress From the Coherence Operator When a Coherence Pod starts Coherence calls back to the Operator to obtain the site name and rack name based on the Node the Pod is scheduled onto. For this to work, there needs to be an egress policy to allow Coherence Pods to access the Operator. During certain operations the Operator needs to call the Coherence members health endpoint to check health and status. For this to work there needs to be an ingress policy to allow the Operator access to the health endpoint in the Coherence Pods The policy below applies to Pods with the coherenceComponent: coherencePod label, which will match Coherence cluster member Pods. The policy allows ingress from the Operator to the Coherence Pod health port from namespace coherence using the namespace selector label kubernetes.io/metadata.name: coherence and Pod selector label app.kubernetes.io/name: coherence-operator The policy allows egress from the Coherence pods to the Operator’s REST server operator port. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: coherence-operator-cluster-member-access spec: podSelector: matchLabels: coherenceComponent: coherencePod policyTypes: - Ingress - Egress ingress: - from: - namespaceSelector: matchLabels: kubernetes.io/metadata.name: coherence podSelector: matchLabels: app.kubernetes.io/name: coherence-operator ports: - port: health protocol: TCP egress: - to: - namespaceSelector: matchLabels: kubernetes.io/metadata.name: coherence podSelector: matchLabels: app.kubernetes.io/name: coherence-operator ports: - port: operator protocol: TCP If the Operator is not running in the coherence namespace then the namespace match label can be changed to the required value. The policy above should be applied to the namespace where the Coherence cluster will be deployed. Client Access (Coherence*Extend and gRPC) A typical Coherence cluster does not run in isolation but as part of a larger application. If the application has other Pods that are Coherence clients, then they will need access to the Coherence cluster. This would usually mean creating ingress and egress policies for the Coherence Extend port and gRPC port, depending on which Coherence APIs are being used. Instead of using actual port numbers, a NetworkPolicy can be made more flexible by using port names. When ports are defined in a container spec of a Pod, they are usually named. By using the names of the ports in the NetworkPolicy instead of port numbers, the real port numbers can be changed without affecting the network policy. Coherence Extend Access If Coherence Extend is being used, then first the Extend Proxy must be configured to use a fixed port. The default behaviour of Coherence is to bind the Extend proxy to an ephemeral port and clients use the Coherence NameService to look up the port to use. When using the default Coherence images, for example ghcr.io/oracle/coherence-ce:22.06 the Extend proxy is already configured to run on a fixed port 20000 . When using this image, or any image that uses the default Coherence cache configuration file, this port can be changed by setting the COHERENCE_EXTEND_PORT environment variable. When using the Coherence Concurrent extensions over Extend, the Concurrent Extend proxy also needs to be configured with a fixed port. When using the default Coherence images, for example ghcr.io/oracle/coherence-ce:22.06 the Concurrent Extend proxy is already configured to run on a fixed port 20001 . When using this image, or any image that uses the default Coherence cache configuration file, this port can be changed by setting the COHERENCE_CONCURRENT_EXTEND_PORT environment variable. For the examples below, a Coherence deployment has the following configuration. This will expose Extend on a port named extend with a port number of 20000 , and a port named extend-atomics with a port number of 20001 . The polices described below will then use the port names, so if required the port number could be changed and the policies would still work. apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: storage spec: ports: - name: extend port: 20000 - name: extend-atomics port: 20001 The ingress policy below will work with the default Coherence image and allow ingress into the Coherence Pods to both the default Extend port and Coherence Concurrent Extend port. The policy allows ingress from Pods that have the coherence.oracle.com/extendClient: true label, from any namespace. It could be tightened further by using a more specific namespace selector. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: coherence-extend-ingress spec: podSelector: matchLabels: coherenceComponent: coherencePod policyTypes: - Ingress ingress: - from: - namespaceSelector: {} podSelector: matchLabels: coherence.oracle.com/extendClient: \"true\" ports: - port: extend protocol: TCP - port: extend-atomics protocol: TCP The policy above should be applied to the namespace where the Coherence cluster is running. Instead of using fixed port numbers in the The egress policy below will work with the default Coherence image and allow egress from Pods with the coherence.oracle.com/extendClient: true label to Coherence Pods with the label coherenceComponent: coherencePod . on both the default Extend port and Coherence Concurrent Extend port. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: coherence-extend-egress spec: podSelector: matchLabels: coherence.oracle.com/extendClient: \"true\" policyTypes: - Ingress egress: - to: - namespaceSelector: { } podSelector: matchLabels: coherenceComponent: coherencePod ports: - port: extend protocol: TCP - port: extend-atomics protocol: TCP The policy above allows egress to Coherence Pods in any namespace. This would ideally be tightened up to the specific namespace that the Coherence cluster is deployed in. For example, if the Coherence cluster is deployed in the datastore namespace, then the to section of policy could be changed as follows: - to: - namespaceSelector: matchLabels: kubernetes.io/metadata.name: datastore podSelector: matchLabels: coherenceComponent: coherencePod This policy must be applied to the namespace where the client Pods will be deployed . Coherence gRPC Access If Coherence gRPC is being used, then first the gRPC Proxy must be configured to use a fixed port. When using the default Coherence images, for example ghcr.io/oracle/coherence-ce:22.06 the gRPC proxy is already configured to run on a fixed port 1408 . The gRPC proxy port can be changed by setting the COHERENCE_GRPC_PORT environment variable. The ingress policy below will allow ingress into the Coherence Pods gRPC port. The policy allows ingress from Pods that have the coherence.oracle.com/grpcClient: true label, from any namespace. It could be tightened further by using a more specific namespace selector. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: coherence-grpc-ingress spec: podSelector: matchLabels: coherenceComponent: coherencePod policyTypes: - Ingress ingress: - from: - namespaceSelector: {} podSelector: matchLabels: coherence.oracle.com/grpcClient: \"true\" ports: - port: grpc protocol: TCP The policy above should be applied to the namespace where the Coherence cluster is running. The egress policy below will allow egress to the gRPC port from Pods with the coherence.oracle.com/grpcClient: true label to Coherence Pods with the label coherenceComponent: coherencePod . apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: coherence-extend-egress spec: podSelector: matchLabels: coherence.oracle.com/extendClient: \"true\" policyTypes: - Ingress egress: - to: - namespaceSelector: { } podSelector: matchLabels: coherenceComponent: coherencePod ports: - port: extend protocol: TCP - port: extend-atomics protocol: TCP The policy above allows egress to Coherence Pods in any namespace. This would ideally be tightened up to the specific namespace that the Coherence cluster is deployed in. For example, if the Coherence cluster is deployed in the datastore namespace, then the to section of policy could be changed as follows: - to: - namespaceSelector: matchLabels: kubernetes.io/metadata.name: datastore podSelector: matchLabels: coherenceComponent: coherencePod This policy must be applied to the namespace where the client Pods will be deployed . Coherence Metrics If Coherence metrics is enabled there will need to be an ingress policy to allow connections from metrics clients. There would also need to be a similar egress policy in the metrics client’s namespace to allow it to access the Coherence metrics endpoints. A simple Coherence resource that will create a cluster with metrics enabled is shown below. This yaml will create a Coherence cluster with a port names metrics that maps to the default metrics port of '9612`. apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: storage spec: coherence: metrics: enabled: true ports: - name: metrics serviceMonitor: enabled: true The example below will assume that metrics will be scraped by Prometheus, and that Prometheus is installed into a namespace called monitoring . An ingress policy must be created in the namespace where the Coherence cluster is deployed allowing ingress to the metrics port from the Prometheus Pods. The Pods running Prometheus have a label app.kubernetes.io/name: prometheus so this can be used in the policy’s Pod selector. This policy should be applied to the namespace where the Coherence cluster is running. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: coherence-metrics-ingress spec: podSelector: matchLabels: coherenceComponent: coherencePod policyTypes: - Ingress ingress: - from: - namespaceSelector: matchLabels: kubernetes.io/metadata.name: monitoring podSelector: matchLabels: app.kubernetes.io/name: prometheus ports: - port: metrics protocol: TCP If the monitoring namespace also has a \"deny-all\" policy and needs egress opening up for Prometheus to scrape metrics then an egress policy will need to be added to the monitoring namespace. The policy below will allow Pods with the label app.kubernetes.io/name: prometheus egress to Pods with the coherenceComponent: coherencePod label in any namespace. The policy could be further tightened up by adding a namespace selector to restrict egress to the specific namespace where the Coherence cluster is running. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: coherence-metrics-egress spec: podSelector: matchLabels: app.kubernetes.io/name: prometheus policyTypes: - Egress egress: - to: - namespaceSelector: { } podSelector: matchLabels: coherenceComponent: coherencePod ports: - port: metrics protocol: TCP Testing Network Policies At the time of writing this documentation, Kubernetes provides no way to verify the correctness of network policies. It is easy to mess up a policy, in which case policies will either block too much traffic, in which case your application will work, or worse they will not be blocking access and leave a security hole. As we have had various requests for help from customers who cannot get Coherence to work with network policies enabled, the Operator has a simple utility to test connectivity outside of Coherence. This will allow testing pf policies without the complications of having to start a Coherence server. This example includes some simple yaml files that will create simulator Pods that listen on all the ports used by the Operator and by a Coherence cluster member. These simulator Pods are configured with the same labels that the real Operator and Coherence Pods would have and the same labels used by the network policies in this example. Also included are some yaml files that start a test client, that simulates either the Operator connecting to Coherence Pods or a Coherence Pod connecting to the Operator and to other Coherence Pods. To run these tests, the Operator does not have to be installed. Create the Test Namespaces In this example we will assume the Operator will eventually be running in a namespace called coherence and the Coherence cluster will run in a namespace called coh-test . We can create the namespaces using kubectl kubectl create ns coherence kubectl create ns coh-test At this point there are no network policies installed, this will allow us to confirm the connectivity tests work. Start the Operator Simulator The Operator simulator server should run in the coherence namespace. It can be created using the following command: kubectl -n coherence apply -f examples/095_network_policies/manifests/net-test-operator-server.yaml Start the Coherence Cluster Simulator The Coherence cluster member simulator server should run in the coh-test namespace. It can be created using the following command: kubectl -n coh-test apply -f examples/095_network_policies/manifests/net-test-coherence-server.yaml Run the Operator Test We can now run the Operator test Job. This wil run a Kubernetes Job that simulates the Operator connecting to the Kubernetes API server and to the Operator Pods. kubectl -n coherence apply -f examples/095_network_policies/manifests/net-test-operator.yaml The test Job should complete very quickly as it is only testing connectivity to various ports. The results of the test can be seen by looking at the Pod log. The command below will display the log: kubectl -n coherence logs $(kubectl -n coherence get pod -l 'coherenceNetTest=operator-client' -o name) The output from a successful test will look like this: 1.6727606592497227e+09 INFO runner Operator Version: 3.3.2 1.6727606592497835e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727606592500978e+09 INFO runner Operator Built By: jonathanknight 1.6727606592501197e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727606592501485e+09 INFO runner Go Version: go1.19.2 1.6727606592501757e+09 INFO runner Go OS/Arch: linux/amd64 1.6727606592504115e+09 INFO net-test Starting test {\"Name\": \"Operator Simulator\"} 1.6727606592504556e+09 INFO net-test Testing connectivity {\"PortName\": \"K8s API Server\"} 1.6727606592664087e+09 INFO net-test Testing connectivity PASSED {\"PortName\": \"K8s API Server\", \"Version\": \"v1.24.7\"} 1.6727606592674055e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676} 1.6727606592770455e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676} We can see that the test has connected to the Kubernetes API server and has connected to the health port on the Coherence cluster test server in the coh-test namespace. The test Job can then be deleted: kubectl -n coherence delete -f examples/095_network_policies/manifests/net-test-operator.yaml Run the Cluster Member Test The cluster member test simulates a Coherence cluster member connecting to other cluster members in the same namespace and also making calls to the Operator’s REST endpoint. kubectl -n coh-test apply -f examples/095_network_policies/manifests/net-test-coherence.yaml Again, the test should complete quickly as it is just connecting to various ports. The results of the test can be seen by looking at the Pod log. The command below will display the log: kubectl -n coh-test logs $(kubectl -n coh-test get pod -l 'coherenceNetTest=coherence-client' -o name) The output from a successful test will look like this: 1.6727631152848177e+09 INFO runner Operator Version: 3.3.2 1.6727631152849226e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727631152849536e+09 INFO runner Operator Built By: jonathanknight 1.6727631152849755e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727631152849965e+09 INFO runner Go Version: go1.19.2 1.6727631152850187e+09 INFO runner Go OS/Arch: linux/amd64 1.6727631152852216e+09 INFO net-test Starting test {\"Name\": \"Cluster Member Simulator\"} 1.6727631152852666e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"UnicastPort1\", \"Port\": 7575} 1.6727631152997334e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"UnicastPort1\", \"Port\": 7575} 1.6727631152998908e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"UnicastPort2\", \"Port\": 7576} 1.6727631153059115e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"UnicastPort2\", \"Port\": 7576} 1.6727631153063197e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Management\", \"Port\": 30000} 1.6727631153116117e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Management\", \"Port\": 30000} 1.6727631153119817e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Metrics\", \"Port\": 9612} 1.6727631153187876e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Metrics\", \"Port\": 9612} 1.6727631153189638e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-operator-server.coherence.svc\", \"PortName\": \"OperatorRest\", \"Port\": 8000} 1.6727631153265746e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-operator-server.coherence.svc\", \"PortName\": \"OperatorRest\", \"Port\": 8000} 1.6727631153267298e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Echo\", \"Port\": 7} 1.6727631153340726e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Echo\", \"Port\": 7} 1.6727631153342876e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"ClusterPort\", \"Port\": 7574} 1.6727631153406997e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"ClusterPort\", \"Port\": 7574} The test client successfully connected to the Coherence cluster port (7475), the two unicast ports (7575 and 7576), the Coherence management port (30000), the Coherence metrics port (9612), the Operator REST port (8000), and the echo port (7). The test Job can then be deleted: kubectl -n coh-test delete -f examples/095_network_policies/manifests/net-test-coherence.yaml Testing the Operator Web Hook The Operator has a web-hook that k8s calls to validate Coherence resource configurations and to provide default values. Web hooks in Kubernetes use TLS by default and listen on port 443. The Operator server simulator also listens on port 443 to allow this connectivity to be tested. The network policy in this example that allows ingress to the web-hook allows any client to connect. This is because it is not always simple to work out the IP address that the API server will connect to the web-hook from. We can use the network tester to simulate this by running a Job that will connect to the web hook port. The web-hook test job in this example does not label the Pod and can be run from the default namespace to simulate a random external connection. kubectl -n default apply -f examples/095_network_policies/manifests/net-test-webhook.yaml We can then check the results of the Job by looking at the Pod log. kubectl -n default logs $(kubectl -n default get pod -l 'coherenceNetTest=webhook-client' -o name) The output from a successful test will look like this: 1.6727639834559627e+09 INFO runner Operator Version: 3.3.2 1.6727639834562948e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727639834563956e+09 INFO runner Operator Built By: jonathanknight 1.6727639834565024e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727639834566057e+09 INFO runner Go Version: go1.19.2 1.6727639834567096e+09 INFO runner Go OS/Arch: linux/amd64 1.6727639834570327e+09 INFO net-test Starting test {\"Name\": \"Web-Hook Client\"} 1.6727639834571698e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-operator-server.coherence.svc\", \"PortName\": \"WebHook\", \"Port\": 443} 1.6727639834791095e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-operator-server.coherence.svc\", \"PortName\": \"WebHook\", \"Port\": 443} We can see that the client successfully connected to port 443. The test Job can then be deleted: kubectl -n default delete -f examples/095_network_policies/manifests/net-test-webhook.yaml Testing Ad-Hoc Ports The test client is able to test connectivity to any host and port. For example suppose we want to simulate a Prometheus Pod connecting to the metrics port of a Coherence cluster. The server simulator is listening on port 9612, so we need to run the client to connect to that port. We can create a Job yaml file to run the test client. As the test will simulate a Prometheus client we add the labels that a standard Prometheus Pod would have and that we also use in the network policies in this example. In the Job yaml, we need to set the HOST , PORT and optionally the PROTOCOL environment variables. In this test, the host is the DNS name for the Service created for the Coherence server simulator net-test-coherence-server.coh-test.svc , the port is the metrics port 9612 and the protocol is tcp . apiVersion: batch/v1 kind: Job metadata: name: test-client labels: app.kubernetes.io/name: prometheus coherenceNetTest: client spec: template: metadata: labels: app.kubernetes.io/name: prometheus coherenceNetTest: client spec: containers: - name: net-test image: ghcr.io/oracle/coherence-operator:3.4.0 env: - name: HOST value: net-test-coherence-server.coh-test.svc - name: PORT value: \"9612\" - name: PROTOCOL value: tcp command: - /files/runner args: - net-test - client restartPolicy: Never backoffLimit: 4 We need to run the test Job in the monitoring namespace, which is the same namespace that Prometheus is usually deployed into. kubectl -n monitoring apply -f examples/095_network_policies/manifests/net-test-client.yaml We can then check the results of the Job by looking at the Pod log. kubectl -n monitoring logs $(kubectl -n monitoring get pod -l 'coherenceNetTest=client' -o name) The output from a successful test will look like this: 1.6727665901488597e+09 INFO runner Operator Version: 3.3.2 1.6727665901497366e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727665901498337e+09 INFO runner Operator Built By: jonathanknight 1.6727665901498716e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727665901498966e+09 INFO runner Go Version: go1.19.2 1.6727665901499205e+09 INFO runner Go OS/Arch: linux/amd64 1.6727665901501486e+09 INFO net-test Starting test {\"Name\": \"Simple Client\"} 1.6727665901501985e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"net-test-coherence-server.coh-test.svc-9612\", \"Port\": 9612} 1.6727665901573336e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"net-test-coherence-server.coh-test.svc-9612\", \"Port\": 9612} We can see that the test client successfully connected to the Coherence cluster member simulator on port 9612. The test Job can then be deleted: kubectl -n monitoring delete -f examples/095_network_policies/manifests/net-test-client.yaml Test with Network Policies All the above tests ran successfully without any network policies. We can now start to apply policies and re-run the tests to see what happens. In a secure environment we would start with a policy that blocks all access and then gradually open up required ports. We can apply the deny-all.yaml policy and then re-run the tests. We should apply the policy to both of the namespaces we are using in this example: kubectl -n coherence apply -f examples/095_network_policies/manifests/deny-all.yaml kubectl -n coh-test apply -f examples/095_network_policies/manifests/deny-all.yaml Now, re-run the Operator test client: kubectl -n coherence apply -f examples/095_network_policies/manifests/net-test-operator.yaml and check the result: kubectl -n coherence logs $(kubectl -n coherence get pod -l 'coherenceNetTest=operator-client' -o name) 1.6727671834237397e+09 INFO runner Operator Version: 3.3.2 1.6727671834238796e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727671834239576e+09 INFO runner Operator Built By: jonathanknight 1.6727671834240365e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727671834240875e+09 INFO runner Go Version: go1.19.2 1.6727671834241736e+09 INFO runner Go OS/Arch: linux/amd64 1.6727671834244306e+09 INFO net-test Starting test {\"Name\": \"Operator Simulator\"} 1.6727671834245417e+09 INFO net-test Testing connectivity {\"PortName\": \"K8s API Server\"} 1.6727672134268515e+09 INFO net-test Testing connectivity FAILED {\"PortName\": \"K8s API Server\", \"Error\": \"Get \\\"https://10.96.0.1:443/version?timeout=32s\\\": dial tcp 10.96.0.1:443: i/o timeout\"} 1.6727672134269848e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676} 1.6727672234281697e+09 INFO net-test Testing connectivity FAILED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676, \"Error\": \"dial tcp: lookup net-test-coherence-server.coh-test.svc: i/o timeout\"} We can see that the test client failed to connect to the Kubernetes API server and failed to connect to the Coherence cluster health port. This means the deny-all policy is working. We can now apply the various polices to fix the test kubectl -n coherence apply -f examples/095_network_policies/manifests/allow-dns.yaml kubectl -n coherence apply -f examples/095_network_policies/manifests/allow-k8s-api-server.yaml kubectl -n coherence apply -f examples/095_network_policies/manifests/allow-operator-cluster-member-egress.yaml kubectl -n coherence apply -f examples/095_network_policies/manifests/allow-operator-rest-ingress.yaml kubectl -n coherence apply -f examples/095_network_policies/manifests/allow-webhook-ingress-from-all.yaml kubectl -n coh-test apply -f examples/095_network_policies/manifests/allow-dns.yaml kubectl -n coh-test apply -f examples/095_network_policies/manifests/allow-cluster-member-access.yaml kubectl -n coh-test apply -f examples/095_network_policies/manifests/allow-cluster-member-operator-access.yaml kubectl -n coh-test apply -f examples/095_network_policies/manifests/allow-metrics-ingress.yaml Now, delete and re-run the Operator test client: kubectl -n coherence delete -f examples/095_network_policies/manifests/net-test-operator.yaml kubectl -n coherence apply -f examples/095_network_policies/manifests/net-test-operator.yaml and check the result: kubectl -n coherence logs $(kubectl -n coherence get pod -l 'coherenceNetTest=operator-client' -o name) Now with the policies applied the test should have passed. 1.6727691273634596e+09 INFO runner Operator Version: 3.3.2 1.6727691273635025e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727691273635256e+09 INFO runner Operator Built By: jonathanknight 1.6727691273635616e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727691273637156e+09 INFO runner Go Version: go1.19.2 1.6727691273637407e+09 INFO runner Go OS/Arch: linux/amd64 1.6727691273639407e+09 INFO net-test Starting test {\"Name\": \"Operator Simulator\"} 1.6727691273639877e+09 INFO net-test Testing connectivity {\"PortName\": \"K8s API Server\"} 1.6727691273857167e+09 INFO net-test Testing connectivity PASSED {\"PortName\": \"K8s API Server\", \"Version\": \"v1.24.7\"} 1.6727691273858056e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676} 1.6727691273933685e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676} The other tests can also be re-run and should also pass. Clean-Up Once the tests are completed, the test servers and Jobs can be deleted. kubectl -n coherence delete -f examples/095_network_policies/manifests/net-test-operator-server.yaml kubectl -n coh-test delete -f examples/095_network_policies/manifests/net-test-coherence-server.yaml ", + "text": " This example covers running the Coherence Operator and Coherence clusters in Kubernetes with network policies. In Kubernetes, a Network Policy is an application-centric construct which allow you to specify how a pod is allowed to communicate with various network \"entities\" (we use the word \"entity\" here to avoid overloading the more common terms such as \"endpoints\" and \"services\", which have specific Kubernetes connotations) over the network. Note Network policies in Kubernetes are easy to get wrong if you are not careful. In this case a policy will either block traffic it should not, in which case your application will not work, or it will let traffic through it should block, which will be an invisible security hole. It is obviously important to test your policies, but Kubernetes offers next to zero visibility into what the policies are actually doing, as it is typically the network CNI extensions that are providing the policy implementation and each of these may work in a different way. Introduction Kubernetes network policies specify the access permissions for groups of pods, similar to security groups in the cloud are used to control access to VM instances and similar to firewalls. The default behaviour of a Kubernetes cluster is to allow all Pods to freely talk to each other. Whilst this sounds insecure, originally Kubernetes was designed to orchestrate services that communicated with each other, it was only later that network policies were added. A network policy is applied to a Kubernetes namespace and controls ingress into and egress out of Pods in that namespace. The ports specified in a NetworkPolicy are the ports exposed by the Pods , they are not any ports that may be exposed by any Service that exposes the Pod ports. For example, if a Pod exposed port 8080 and a Service exposing the Pod mapped port 80 in the Service to port 8080 in the Pod , the NetworkPolicy ingress rule would be for the Pod port 8080. Network polices would typically end up being dictated by corporate security standards where different companies may apply stricter or looser rules than others. The examples in this document start from the premise that everything will be blocked by a \"deny all\" policy and then opened up as needed. This is the most secure use of network policies, and hence the examples can easily be tweaked if looser rules are applied. This example has the following sections: Deny All Policy - denying all ingress and egress Allow DNS - almost every use case will require egress to DNS Coherence Operator Policies - the network policies required to run the Coherence Operator Kubernetes API Server - allow the Operator egress to the Kubernetes API server Coherence Clusters Pods - allow the Operator egress to the Coherence cluster Pods Web Hooks - allow ingress to the Operator’s web hook port Coherence Cluster Policies - the network policies required to run Coherence clusters Inter-Cluster Access - allow Coherence cluster Pods to communicate Coherence Operator - allow Coherence cluster Pods to communicate with the Operator Clients - allows access by Extend and gRPC clients Metrics - allow Coherence cluster member metrics to be scraped Testing Connectivity - using the Operator’s network connectivity test utility to test policies Deny All Policy Kubernetes does not have a “deny all” policy, but this can be achieved with a regular network policy that specifies a policyTypes of both 'Ingress` and Egress but omits any definitions. A wild-card podSelector: {} applies the policy to all Pods in the namespace. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: deny-all spec: podSelector: {} policyTypes: - Ingress - Egress ingress: [] egress: [] The policy above can be installed into the coherence namespace with the following command: kubectl -n coherence apply -f manifests/deny-all.yaml After installing the deny-all policy, any Pod in the coherence namespace will not be allowed either ingress, nor egress. Very secure, but probably impractical for almost all use cases. After applying the deny-all policy more polices can be added to gradually open up the required access to run the Coherence Operator and Coherence clusters. Allow DNS When enforcing egress, such as with the deny-all policy above, it is important to remember that virtually every Pod needs to communicate with other Pods or Services, and will therefore need to access DNS. The policy below allows all Pods (using podSelector: {} ) egress to both TCP and UDP on port 53 in all namespaces. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: allow-dns spec: podSelector: { } policyTypes: - Egress egress: - to: - namespaceSelector: { } ports: - protocol: UDP port: 53 # - protocol: TCP # port: 53 If allowing DNS egress to all namespaces is overly permissive, DNS could be further restricted to just the kube-system namespace, therefore restricting DNS lookups to only Kubernetes internal DNS. Kubernetes applies the kubernetes.io/metadata.name label to namespaces, and sets its value to the namespace name, so this can be used in label matchers. With the policy below, Pods will be able to use internal Kubernetes DNS only. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: allow-dns spec: podSelector: { } policyTypes: - Egress egress: - to: - namespaceSelector: matchLabels: kubernetes.io/metadata.name: kube-system ports: - protocol: UDP port: 53 # - protocol: TCP # port: 53 The policy above can be installed into the coherence namespace with the following command: kubectl -n coherence apply -f manifests/allow-dns-kube-system.yaml Tip Some documentation regarding allowing DNS with Kubernetes network policies only shows opening up UDP connections. During our testing with network policies, we discovered that with only UDP allowed any lookup for a fully qualified name would fail. For example nslookup my-service.my-namespace.svc would work, but the fully qualified nslookup my-service.my-namespace.svc.cluster.local would not. Adding TCP to the DNS policy allowed DNS lookups with .cluster.local to also work. Neither the Coherence Operator, nor Coherence itself use a fully qualified service name for a DNS lookup. It appears that Java’s InetAddress.findAllByName() method still works only with UDP, albeit extremely slowly. By default, the service name used for the Coherence WKA setting uses just the .svc suffix. Coherence Operator Policies Assuming the coherence namespace exists, and the deny-all and allow-dns policies described above have been applied, if the Coherence Operator is installed, it wil fail to start as it has no access to endpoints it needs to operate. The following sections will add network polices to allow the Coherence Operator to access Kubernetes services and Pods it requires. Access the Kubernetes API Server The Coherence Operator uses Kubernetes APIs to manage various resources in the Kubernetes cluster. For this to work, the Operator Pod must be allowed egress to the Kubernetes API server. Configuring access to the API server is not as straight forward as other network policies. The reason for this is that there is no Pod available with labels that can be used in the configuration, instead, the IP address of the API server itself must be used. There are various methods to find the IP address of the API server. The exact method required may vary depending on the type of Kubernetes cluster being used, for example a simple development cluster running in KinD on a laptop may differ from a cluster running in a cloud provider’s infrastructure. The common way to find the API server’s IP address is to use kubectl cluster-info as follows: $ kubectl cluster-info Kubernetes master is running at https://192.168.99.100:8443 In the above case the IP address of the API server would be 192.168.99.100 and the port is 8443 . In a simple KinD development cluster, the API server IP address can be obtained using kubectl as shown below: $ kubectl -n default get endpoints kubernetes -o json { \"apiVersion\": \"v1\", \"kind\": \"Endpoints\", \"metadata\": { \"creationTimestamp\": \"2023-02-08T10:31:26Z\", \"labels\": { \"endpointslice.kubernetes.io/skip-mirror\": \"true\" }, \"name\": \"kubernetes\", \"namespace\": \"default\", \"resourceVersion\": \"196\", \"uid\": \"68b0a7de-c0db-4524-a1a2-9d29eb137f28\" }, \"subsets\": [ { \"addresses\": [ { \"ip\": \"192.168.49.2\" } ], \"ports\": [ { \"name\": \"https\", \"port\": 8443, \"protocol\": \"TCP\" } ] } ] } In the above case the IP address of the API server would be 192.168.49.2 and the port is 8443 . The IP address displayed for the API server can then be used in the network policy. The policy shown below allows Pods with the app.kubernetes.io/name: coherence-operator label (which the Operator has) egress access to the API server. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: operator-to-apiserver-egress spec: podSelector: matchLabels: app.kubernetes.io/name: coherence-operator policyTypes: - Egress - Ingress egress: - to: - ipBlock: cidr: 172.18.0.2/24 - ipBlock: cidr: 10.96.0.1/24 ports: - port: 6443 protocol: TCP - port: 443 protocol: TCP The allow-k8s-api-server.yaml policy can be installed into the coherence namespace to allow the Operator to communicate with the API server. kubectl -n coherence apply -f manifests/allow-k8s-api-server.yaml With the allow-k8s-api-server.yaml policy applied, the Coherence Operator should now start correctly and its Pods should reach the \"ready\" state. Ingress From and Egress Into Coherence Cluster Member Pods When a Coherence cluster is deployed, on start-up of a Pod the cluster member will connect to the Operator’s REST endpoint to query the site name and rack name, based on the Node the Coherence member is running on. To allow this to happen the Operator needs to be configured with the relevant ingress policy. The coherence-operator-rest-ingress policy applies to the Operator Pod, as it has a podSelector label of app.kubernetes.io/name: coherence-operator , which is a label applied to the Operator Pod. The policy allows any Pod with the label coherenceComponent: coherencePod ingress into the operator REST port. When the Operator creates a Coherence cluster, it applies the label coherenceComponent: coherencePod to all the Coherence cluster Pods. The policy below allows access from all namespaces using namespaceSelector: { } but it could be tightened up to specific namespaces if required. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: coherence-operator-rest-ingress spec: podSelector: matchLabels: app.kubernetes.io/name: coherence-operator policyTypes: - Ingress ingress: - from: - namespaceSelector: { } podSelector: matchLabels: coherenceComponent: coherencePod ports: - port: operator protocol: TCP During operations such as scaling and shutting down of a Coherence cluster, the Operator needs to connect to the health endpoint of the Coherence cluster Pods. The coherence-operator-cluster-member-egress policy below applies to the Operator Pod, as it has a podSelector label of app.kubernetes.io/name: coherence-operator , which is a label applied to the Operator Pod. The policy allows egress to the health port in any Pod with the label coherenceComponent: coherencePod . When the Operator creates a Coherence cluster, it applies the label coherenceComponent: coherencePod to all the Coherence cluster Pods. The policy below allows egress to Coherence Pods in all namespaces using namespaceSelector: { } but it could be tightened up to specific namespaces if required. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: coherence-operator-cluster-member-egress spec: podSelector: matchLabels: app.kubernetes.io/name: coherence-operator policyTypes: - Egress egress: - to: - namespaceSelector: { } podSelector: matchLabels: coherenceComponent: coherencePod ports: - port: health protocol: TCP The two policies can be applied to the coherence namespace. kubectl -n coherence apply -f manifests/allow-operator-rest-ingress.yaml kubectl -n coherence apply -f manifests/allow-operator-cluster-member-egress.yaml Webhook Ingress With all the above policies in place, the Operator is able to work correctly, but if a Coherence resource is now created Kubernetes will be unable to call the Operator’s webhook without the correct ingress policy. The following example demonstrates this. Assume there is a minimal`Coherence` yaml file named minimal.yaml that will create a single member Coherence cluster. apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: storage spec: replicas: 1 If minimal.yaml is applied using kubectl with a small timeout of 10 seconds, the creation of the resource will fail due to Kubernetes not having access to the Coherence Operator webhook. $ kubectl apply --timeout=10s -f minimal.yaml Error from server (InternalError): error when creating \"minimal.yaml\": Internal error occurred: failed calling webhook \"coherence.oracle.com\": failed to call webhook: Post \"https://coherence-operator-webhook.operator-test.svc:443/mutate-coherence-oracle-com-v1-coherence?timeout=10s\": context deadline exceeded The simplest solution is to allow ingress from any IP address to the webhook on port, with a policy like that shown below. This policy uses and empty from: [] attribute, which allows access from anywhere to the webhook-server port in the Pod. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: apiserver-to-operator-webhook-ingress spec: podSelector: matchLabels: app.kubernetes.io/name: coherence-operator policyTypes: - Ingress ingress: - from: [] ports: - port: webhook-server protocol: TCP Allowing access to the webhook from anywhere is not very secure, so a more restrictive from attribute could be used to limit access to the IP address (or addresses) of the Kubernetes API server. As with the API server policy above, the trick here is knowing the API server addresses to use. The policy below only allows access from specific addresses: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: apiserver-to-operator-webhook-ingress spec: podSelector: matchLabels: app.kubernetes.io/name: coherence-operator policyTypes: - Ingress ingress: - from: - ipBlock: cidr: 172.18.0.2/24 - ipBlock: cidr: 10.96.0.1/24 ports: - port: webhook-server protocol: TCP - port: 443 protocol: TCP Coherence Cluster Member Policies Once the policies are in place to allow the Coherence Operator to work, the policies to allow Coherence clusters to run can be put in place. The exact set of policies requires will vary depending on the Coherence functionality being used. If Coherence is embedded in another application, such as a web-server, then additional policies may also be needed to allow ingress to other endpoints. Conversely, if the Coherence application needs access to other services, for example a database, then additional egress policies may need to be created. This example is only going to cover Coherence use cases, but it should be simple enough to apply the same techniques to policies for other applications. Access Other Cluster Members All Pods in a Coherence cluster must be able to talk to each other (otherwise they wouldn’t be a cluster). This means that there needs to be ingress and egress policies to allow this. Cluster port : The default cluster port is 7574, and there is almost never any need to change this, especially in a containerised environment where there is little chance of port conflicts. Unicast ports : Unicast uses TMB (default) and UDP. Each cluster member listens on one UDP and one TCP port and both ports need to be opened in the network policy. The default behaviour of Coherence is for the unicast ports to be automatically assigned from the operating system’s available ephemeral port range. When securing Coherence with network policies, the use of ephemeral ports will not work, so a range of ports can be specified for coherence to operate within. The Coherence Operator sets values for both unicast ports so that ephemeral ports will not be used. The default values are 7575 and 7576 . The two unicast ports can be changed in the Coherence spec by setting the spec.coherence.localPort field, and the spec.coherence.localPortAdjust field for example: apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: storage spec: coherence: localPort: 9000 localPortAdjust: 9001 Alternatively the values can also be configured using environment variables env: - name: COHERENCE_LOCALPORT value: \"9000\" - name: COHERENCE_LOCALPORT_ADJUST value: \"9001\" Echo port 7 : The default TCP port of the IpMonitor component that is used for detecting hardware failure of cluster members. Coherence doesn’t bind to this port, it only tries to connect to it as a means of pinging remote machines, or in this case Pods. The Coherence Operator applies the coherenceComponent: coherencePod label to all Coherence Pods, so this can be used in the network policy podSelector , to apply the policy to only the Coherence Pods. The policy below works with the default ports configured by the Operator. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: allow-coherence-cluster spec: podSelector: matchLabels: coherenceComponent: coherencePod policyTypes: - Ingress - Egress ingress: - from: - podSelector: matchLabels: coherenceComponent: coherencePod ports: - port: 7574 endPort: 7576 protocol: TCP - port: 7574 endPort: 7576 protocol: UDP - port: 7 protocol: TCP egress: - to: - podSelector: matchLabels: coherenceComponent: coherencePod ports: - port: 7574 endPort: 7576 protocol: TCP - port: 7574 endPort: 7576 protocol: UDP - port: 7 protocol: TCP If the Coherence local port and local port adjust values are changed, then the policy would need to be amended. For example, if COHERENCE_LOCALPORT=9000 and COHERENCE_LOCALPORT_ADJUST=9100 apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: allow-coherence-cluster spec: podSelector: matchLabels: coherenceComponent: coherencePod policyTypes: - Ingress - Egress ingress: - from: - podSelector: matchLabels: coherenceComponent: coherencePod ports: - port: 7574 protocol: TCP - port: 7574 protocol: UDP - port: 9000 endPort: 9100 protocol: TCP - port: 9000 endPort: 9100 protocol: UDP - port: 7 protocol: TCP egress: - to: - podSelector: matchLabels: coherenceComponent: coherencePod ports: - port: 7574 protocol: TCP - port: 7574 protocol: UDP - port: 9000 endPort: 9100 protocol: TCP - port: 9000 endPort: 9100 protocol: UDP - port: 7 protocol: TCP Both of the policies above should be applied to the namespace where the Coherence cluster will be deployed. With the two policies above in place, the Coherence Pods will be able to communicate. Egress to and Ingress From the Coherence Operator When a Coherence Pod starts Coherence calls back to the Operator to obtain the site name and rack name based on the Node the Pod is scheduled onto. For this to work, there needs to be an egress policy to allow Coherence Pods to access the Operator. During certain operations the Operator needs to call the Coherence members health endpoint to check health and status. For this to work there needs to be an ingress policy to allow the Operator access to the health endpoint in the Coherence Pods The policy below applies to Pods with the coherenceComponent: coherencePod label, which will match Coherence cluster member Pods. The policy allows ingress from the Operator to the Coherence Pod health port from namespace coherence using the namespace selector label kubernetes.io/metadata.name: coherence and Pod selector label app.kubernetes.io/name: coherence-operator The policy allows egress from the Coherence pods to the Operator’s REST server operator port. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: coherence-operator-cluster-member-access spec: podSelector: matchLabels: coherenceComponent: coherencePod policyTypes: - Ingress - Egress ingress: - from: - namespaceSelector: matchLabels: kubernetes.io/metadata.name: coherence podSelector: matchLabels: app.kubernetes.io/name: coherence-operator ports: - port: health protocol: TCP egress: - to: - namespaceSelector: matchLabels: kubernetes.io/metadata.name: coherence podSelector: matchLabels: app.kubernetes.io/name: coherence-operator ports: - port: operator protocol: TCP If the Operator is not running in the coherence namespace then the namespace match label can be changed to the required value. The policy above should be applied to the namespace where the Coherence cluster will be deployed. Client Access (Coherence*Extend and gRPC) A typical Coherence cluster does not run in isolation but as part of a larger application. If the application has other Pods that are Coherence clients, then they will need access to the Coherence cluster. This would usually mean creating ingress and egress policies for the Coherence Extend port and gRPC port, depending on which Coherence APIs are being used. Instead of using actual port numbers, a NetworkPolicy can be made more flexible by using port names. When ports are defined in a container spec of a Pod, they are usually named. By using the names of the ports in the NetworkPolicy instead of port numbers, the real port numbers can be changed without affecting the network policy. Coherence Extend Access If Coherence Extend is being used, then first the Extend Proxy must be configured to use a fixed port. The default behaviour of Coherence is to bind the Extend proxy to an ephemeral port and clients use the Coherence NameService to look up the port to use. When using the default Coherence images, for example ghcr.io/oracle/coherence-ce:22.06 the Extend proxy is already configured to run on a fixed port 20000 . When using this image, or any image that uses the default Coherence cache configuration file, this port can be changed by setting the COHERENCE_EXTEND_PORT environment variable. When using the Coherence Concurrent extensions over Extend, the Concurrent Extend proxy also needs to be configured with a fixed port. When using the default Coherence images, for example ghcr.io/oracle/coherence-ce:22.06 the Concurrent Extend proxy is already configured to run on a fixed port 20001 . When using this image, or any image that uses the default Coherence cache configuration file, this port can be changed by setting the COHERENCE_CONCURRENT_EXTEND_PORT environment variable. For the examples below, a Coherence deployment has the following configuration. This will expose Extend on a port named extend with a port number of 20000 , and a port named extend-atomics with a port number of 20001 . The polices described below will then use the port names, so if required the port number could be changed and the policies would still work. apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: storage spec: ports: - name: extend port: 20000 - name: extend-atomics port: 20001 The ingress policy below will work with the default Coherence image and allow ingress into the Coherence Pods to both the default Extend port and Coherence Concurrent Extend port. The policy allows ingress from Pods that have the coherence.oracle.com/extendClient: true label, from any namespace. It could be tightened further by using a more specific namespace selector. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: coherence-extend-ingress spec: podSelector: matchLabels: coherenceComponent: coherencePod policyTypes: - Ingress ingress: - from: - namespaceSelector: {} podSelector: matchLabels: coherence.oracle.com/extendClient: \"true\" ports: - port: extend protocol: TCP - port: extend-atomics protocol: TCP The policy above should be applied to the namespace where the Coherence cluster is running. Instead of using fixed port numbers in the The egress policy below will work with the default Coherence image and allow egress from Pods with the coherence.oracle.com/extendClient: true label to Coherence Pods with the label coherenceComponent: coherencePod . on both the default Extend port and Coherence Concurrent Extend port. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: coherence-extend-egress spec: podSelector: matchLabels: coherence.oracle.com/extendClient: \"true\" policyTypes: - Ingress egress: - to: - namespaceSelector: { } podSelector: matchLabels: coherenceComponent: coherencePod ports: - port: extend protocol: TCP - port: extend-atomics protocol: TCP The policy above allows egress to Coherence Pods in any namespace. This would ideally be tightened up to the specific namespace that the Coherence cluster is deployed in. For example, if the Coherence cluster is deployed in the datastore namespace, then the to section of policy could be changed as follows: - to: - namespaceSelector: matchLabels: kubernetes.io/metadata.name: datastore podSelector: matchLabels: coherenceComponent: coherencePod This policy must be applied to the namespace where the client Pods will be deployed . Coherence gRPC Access If Coherence gRPC is being used, then first the gRPC Proxy must be configured to use a fixed port. When using the default Coherence images, for example ghcr.io/oracle/coherence-ce:22.06 the gRPC proxy is already configured to run on a fixed port 1408 . The gRPC proxy port can be changed by setting the COHERENCE_GRPC_PORT environment variable. The ingress policy below will allow ingress into the Coherence Pods gRPC port. The policy allows ingress from Pods that have the coherence.oracle.com/grpcClient: true label, from any namespace. It could be tightened further by using a more specific namespace selector. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: coherence-grpc-ingress spec: podSelector: matchLabels: coherenceComponent: coherencePod policyTypes: - Ingress ingress: - from: - namespaceSelector: {} podSelector: matchLabels: coherence.oracle.com/grpcClient: \"true\" ports: - port: grpc protocol: TCP The policy above should be applied to the namespace where the Coherence cluster is running. The egress policy below will allow egress to the gRPC port from Pods with the coherence.oracle.com/grpcClient: true label to Coherence Pods with the label coherenceComponent: coherencePod . apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: coherence-extend-egress spec: podSelector: matchLabels: coherence.oracle.com/extendClient: \"true\" policyTypes: - Ingress egress: - to: - namespaceSelector: { } podSelector: matchLabels: coherenceComponent: coherencePod ports: - port: extend protocol: TCP - port: extend-atomics protocol: TCP The policy above allows egress to Coherence Pods in any namespace. This would ideally be tightened up to the specific namespace that the Coherence cluster is deployed in. For example, if the Coherence cluster is deployed in the datastore namespace, then the to section of policy could be changed as follows: - to: - namespaceSelector: matchLabels: kubernetes.io/metadata.name: datastore podSelector: matchLabels: coherenceComponent: coherencePod This policy must be applied to the namespace where the client Pods will be deployed . Coherence Metrics If Coherence metrics is enabled there will need to be an ingress policy to allow connections from metrics clients. There would also need to be a similar egress policy in the metrics client’s namespace to allow it to access the Coherence metrics endpoints. A simple Coherence resource that will create a cluster with metrics enabled is shown below. This yaml will create a Coherence cluster with a port names metrics that maps to the default metrics port of '9612`. apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: storage spec: coherence: metrics: enabled: true ports: - name: metrics serviceMonitor: enabled: true The example below will assume that metrics will be scraped by Prometheus, and that Prometheus is installed into a namespace called monitoring . An ingress policy must be created in the namespace where the Coherence cluster is deployed allowing ingress to the metrics port from the Prometheus Pods. The Pods running Prometheus have a label app.kubernetes.io/name: prometheus so this can be used in the policy’s Pod selector. This policy should be applied to the namespace where the Coherence cluster is running. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: coherence-metrics-ingress spec: podSelector: matchLabels: coherenceComponent: coherencePod policyTypes: - Ingress ingress: - from: - namespaceSelector: matchLabels: kubernetes.io/metadata.name: monitoring podSelector: matchLabels: app.kubernetes.io/name: prometheus ports: - port: metrics protocol: TCP If the monitoring namespace also has a \"deny-all\" policy and needs egress opening up for Prometheus to scrape metrics then an egress policy will need to be added to the monitoring namespace. The policy below will allow Pods with the label app.kubernetes.io/name: prometheus egress to Pods with the coherenceComponent: coherencePod label in any namespace. The policy could be further tightened up by adding a namespace selector to restrict egress to the specific namespace where the Coherence cluster is running. apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: coherence-metrics-egress spec: podSelector: matchLabels: app.kubernetes.io/name: prometheus policyTypes: - Egress egress: - to: - namespaceSelector: { } podSelector: matchLabels: coherenceComponent: coherencePod ports: - port: metrics protocol: TCP Testing Network Policies At the time of writing this documentation, Kubernetes provides no way to verify the correctness of network policies. It is easy to mess up a policy, in which case policies will either block too much traffic, in which case your application will work, or worse they will not be blocking access and leave a security hole. As we have had various requests for help from customers who cannot get Coherence to work with network policies enabled, the Operator has a simple utility to test connectivity outside of Coherence. This will allow testing pf policies without the complications of having to start a Coherence server. This example includes some simple yaml files that will create simulator Pods that listen on all the ports used by the Operator and by a Coherence cluster member. These simulator Pods are configured with the same labels that the real Operator and Coherence Pods would have and the same labels used by the network policies in this example. Also included are some yaml files that start a test client, that simulates either the Operator connecting to Coherence Pods or a Coherence Pod connecting to the Operator and to other Coherence Pods. To run these tests, the Operator does not have to be installed. Create the Test Namespaces In this example we will assume the Operator will eventually be running in a namespace called coherence and the Coherence cluster will run in a namespace called coh-test . We can create the namespaces using kubectl kubectl create ns coherence kubectl create ns coh-test At this point there are no network policies installed, this will allow us to confirm the connectivity tests work. Start the Operator Simulator The Operator simulator server should run in the coherence namespace. It can be created using the following command: kubectl -n coherence apply -f examples/095_network_policies/manifests/net-test-operator-server.yaml Start the Coherence Cluster Simulator The Coherence cluster member simulator server should run in the coh-test namespace. It can be created using the following command: kubectl -n coh-test apply -f examples/095_network_policies/manifests/net-test-coherence-server.yaml Run the Operator Test We can now run the Operator test Job. This wil run a Kubernetes Job that simulates the Operator connecting to the Kubernetes API server and to the Operator Pods. kubectl -n coherence apply -f examples/095_network_policies/manifests/net-test-operator.yaml The test Job should complete very quickly as it is only testing connectivity to various ports. The results of the test can be seen by looking at the Pod log. The command below will display the log: kubectl -n coherence logs $(kubectl -n coherence get pod -l 'coherenceNetTest=operator-client' -o name) The output from a successful test will look like this: 1.6727606592497227e+09 INFO runner Operator Version: 3.3.2 1.6727606592497835e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727606592500978e+09 INFO runner Operator Built By: jonathanknight 1.6727606592501197e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727606592501485e+09 INFO runner Go Version: go1.19.2 1.6727606592501757e+09 INFO runner Go OS/Arch: linux/amd64 1.6727606592504115e+09 INFO net-test Starting test {\"Name\": \"Operator Simulator\"} 1.6727606592504556e+09 INFO net-test Testing connectivity {\"PortName\": \"K8s API Server\"} 1.6727606592664087e+09 INFO net-test Testing connectivity PASSED {\"PortName\": \"K8s API Server\", \"Version\": \"v1.24.7\"} 1.6727606592674055e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676} 1.6727606592770455e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676} We can see that the test has connected to the Kubernetes API server and has connected to the health port on the Coherence cluster test server in the coh-test namespace. The test Job can then be deleted: kubectl -n coherence delete -f examples/095_network_policies/manifests/net-test-operator.yaml Run the Cluster Member Test The cluster member test simulates a Coherence cluster member connecting to other cluster members in the same namespace and also making calls to the Operator’s REST endpoint. kubectl -n coh-test apply -f examples/095_network_policies/manifests/net-test-coherence.yaml Again, the test should complete quickly as it is just connecting to various ports. The results of the test can be seen by looking at the Pod log. The command below will display the log: kubectl -n coh-test logs $(kubectl -n coh-test get pod -l 'coherenceNetTest=coherence-client' -o name) The output from a successful test will look like this: 1.6727631152848177e+09 INFO runner Operator Version: 3.3.2 1.6727631152849226e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727631152849536e+09 INFO runner Operator Built By: jonathanknight 1.6727631152849755e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727631152849965e+09 INFO runner Go Version: go1.19.2 1.6727631152850187e+09 INFO runner Go OS/Arch: linux/amd64 1.6727631152852216e+09 INFO net-test Starting test {\"Name\": \"Cluster Member Simulator\"} 1.6727631152852666e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"UnicastPort1\", \"Port\": 7575} 1.6727631152997334e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"UnicastPort1\", \"Port\": 7575} 1.6727631152998908e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"UnicastPort2\", \"Port\": 7576} 1.6727631153059115e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"UnicastPort2\", \"Port\": 7576} 1.6727631153063197e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Management\", \"Port\": 30000} 1.6727631153116117e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Management\", \"Port\": 30000} 1.6727631153119817e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Metrics\", \"Port\": 9612} 1.6727631153187876e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Metrics\", \"Port\": 9612} 1.6727631153189638e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-operator-server.coherence.svc\", \"PortName\": \"OperatorRest\", \"Port\": 8000} 1.6727631153265746e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-operator-server.coherence.svc\", \"PortName\": \"OperatorRest\", \"Port\": 8000} 1.6727631153267298e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Echo\", \"Port\": 7} 1.6727631153340726e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Echo\", \"Port\": 7} 1.6727631153342876e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"ClusterPort\", \"Port\": 7574} 1.6727631153406997e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"ClusterPort\", \"Port\": 7574} The test client successfully connected to the Coherence cluster port (7475), the two unicast ports (7575 and 7576), the Coherence management port (30000), the Coherence metrics port (9612), the Operator REST port (8000), and the echo port (7). The test Job can then be deleted: kubectl -n coh-test delete -f examples/095_network_policies/manifests/net-test-coherence.yaml Testing the Operator Web Hook The Operator has a web-hook that k8s calls to validate Coherence resource configurations and to provide default values. Web hooks in Kubernetes use TLS by default and listen on port 443. The Operator server simulator also listens on port 443 to allow this connectivity to be tested. The network policy in this example that allows ingress to the web-hook allows any client to connect. This is because it is not always simple to work out the IP address that the API server will connect to the web-hook from. We can use the network tester to simulate this by running a Job that will connect to the web hook port. The web-hook test job in this example does not label the Pod and can be run from the default namespace to simulate a random external connection. kubectl -n default apply -f examples/095_network_policies/manifests/net-test-webhook.yaml We can then check the results of the Job by looking at the Pod log. kubectl -n default logs $(kubectl -n default get pod -l 'coherenceNetTest=webhook-client' -o name) The output from a successful test will look like this: 1.6727639834559627e+09 INFO runner Operator Version: 3.3.2 1.6727639834562948e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727639834563956e+09 INFO runner Operator Built By: jonathanknight 1.6727639834565024e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727639834566057e+09 INFO runner Go Version: go1.19.2 1.6727639834567096e+09 INFO runner Go OS/Arch: linux/amd64 1.6727639834570327e+09 INFO net-test Starting test {\"Name\": \"Web-Hook Client\"} 1.6727639834571698e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-operator-server.coherence.svc\", \"PortName\": \"WebHook\", \"Port\": 443} 1.6727639834791095e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-operator-server.coherence.svc\", \"PortName\": \"WebHook\", \"Port\": 443} We can see that the client successfully connected to port 443. The test Job can then be deleted: kubectl -n default delete -f examples/095_network_policies/manifests/net-test-webhook.yaml Testing Ad-Hoc Ports The test client is able to test connectivity to any host and port. For example suppose we want to simulate a Prometheus Pod connecting to the metrics port of a Coherence cluster. The server simulator is listening on port 9612, so we need to run the client to connect to that port. We can create a Job yaml file to run the test client. As the test will simulate a Prometheus client we add the labels that a standard Prometheus Pod would have and that we also use in the network policies in this example. In the Job yaml, we need to set the HOST , PORT and optionally the PROTOCOL environment variables. In this test, the host is the DNS name for the Service created for the Coherence server simulator net-test-coherence-server.coh-test.svc , the port is the metrics port 9612 and the protocol is tcp . apiVersion: batch/v1 kind: Job metadata: name: test-client labels: app.kubernetes.io/name: prometheus coherenceNetTest: client spec: template: metadata: labels: app.kubernetes.io/name: prometheus coherenceNetTest: client spec: containers: - name: net-test image: ghcr.io/oracle/coherence-operator:3.4.1 env: - name: HOST value: net-test-coherence-server.coh-test.svc - name: PORT value: \"9612\" - name: PROTOCOL value: tcp command: - /files/runner args: - net-test - client restartPolicy: Never backoffLimit: 4 We need to run the test Job in the monitoring namespace, which is the same namespace that Prometheus is usually deployed into. kubectl -n monitoring apply -f examples/095_network_policies/manifests/net-test-client.yaml We can then check the results of the Job by looking at the Pod log. kubectl -n monitoring logs $(kubectl -n monitoring get pod -l 'coherenceNetTest=client' -o name) The output from a successful test will look like this: 1.6727665901488597e+09 INFO runner Operator Version: 3.3.2 1.6727665901497366e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727665901498337e+09 INFO runner Operator Built By: jonathanknight 1.6727665901498716e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727665901498966e+09 INFO runner Go Version: go1.19.2 1.6727665901499205e+09 INFO runner Go OS/Arch: linux/amd64 1.6727665901501486e+09 INFO net-test Starting test {\"Name\": \"Simple Client\"} 1.6727665901501985e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"net-test-coherence-server.coh-test.svc-9612\", \"Port\": 9612} 1.6727665901573336e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"net-test-coherence-server.coh-test.svc-9612\", \"Port\": 9612} We can see that the test client successfully connected to the Coherence cluster member simulator on port 9612. The test Job can then be deleted: kubectl -n monitoring delete -f examples/095_network_policies/manifests/net-test-client.yaml Test with Network Policies All the above tests ran successfully without any network policies. We can now start to apply policies and re-run the tests to see what happens. In a secure environment we would start with a policy that blocks all access and then gradually open up required ports. We can apply the deny-all.yaml policy and then re-run the tests. We should apply the policy to both of the namespaces we are using in this example: kubectl -n coherence apply -f examples/095_network_policies/manifests/deny-all.yaml kubectl -n coh-test apply -f examples/095_network_policies/manifests/deny-all.yaml Now, re-run the Operator test client: kubectl -n coherence apply -f examples/095_network_policies/manifests/net-test-operator.yaml and check the result: kubectl -n coherence logs $(kubectl -n coherence get pod -l 'coherenceNetTest=operator-client' -o name) 1.6727671834237397e+09 INFO runner Operator Version: 3.3.2 1.6727671834238796e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727671834239576e+09 INFO runner Operator Built By: jonathanknight 1.6727671834240365e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727671834240875e+09 INFO runner Go Version: go1.19.2 1.6727671834241736e+09 INFO runner Go OS/Arch: linux/amd64 1.6727671834244306e+09 INFO net-test Starting test {\"Name\": \"Operator Simulator\"} 1.6727671834245417e+09 INFO net-test Testing connectivity {\"PortName\": \"K8s API Server\"} 1.6727672134268515e+09 INFO net-test Testing connectivity FAILED {\"PortName\": \"K8s API Server\", \"Error\": \"Get \\\"https://10.96.0.1:443/version?timeout=32s\\\": dial tcp 10.96.0.1:443: i/o timeout\"} 1.6727672134269848e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676} 1.6727672234281697e+09 INFO net-test Testing connectivity FAILED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676, \"Error\": \"dial tcp: lookup net-test-coherence-server.coh-test.svc: i/o timeout\"} We can see that the test client failed to connect to the Kubernetes API server and failed to connect to the Coherence cluster health port. This means the deny-all policy is working. We can now apply the various polices to fix the test kubectl -n coherence apply -f examples/095_network_policies/manifests/allow-dns.yaml kubectl -n coherence apply -f examples/095_network_policies/manifests/allow-k8s-api-server.yaml kubectl -n coherence apply -f examples/095_network_policies/manifests/allow-operator-cluster-member-egress.yaml kubectl -n coherence apply -f examples/095_network_policies/manifests/allow-operator-rest-ingress.yaml kubectl -n coherence apply -f examples/095_network_policies/manifests/allow-webhook-ingress-from-all.yaml kubectl -n coh-test apply -f examples/095_network_policies/manifests/allow-dns.yaml kubectl -n coh-test apply -f examples/095_network_policies/manifests/allow-cluster-member-access.yaml kubectl -n coh-test apply -f examples/095_network_policies/manifests/allow-cluster-member-operator-access.yaml kubectl -n coh-test apply -f examples/095_network_policies/manifests/allow-metrics-ingress.yaml Now, delete and re-run the Operator test client: kubectl -n coherence delete -f examples/095_network_policies/manifests/net-test-operator.yaml kubectl -n coherence apply -f examples/095_network_policies/manifests/net-test-operator.yaml and check the result: kubectl -n coherence logs $(kubectl -n coherence get pod -l 'coherenceNetTest=operator-client' -o name) Now with the policies applied the test should have passed. 1.6727691273634596e+09 INFO runner Operator Version: 3.3.2 1.6727691273635025e+09 INFO runner Operator Build Date: 2023-01-03T12:25:58Z 1.6727691273635256e+09 INFO runner Operator Built By: jonathanknight 1.6727691273635616e+09 INFO runner Operator Git Commit: c8118585b8f3d72b083ab1209211bcea364c85c5 1.6727691273637156e+09 INFO runner Go Version: go1.19.2 1.6727691273637407e+09 INFO runner Go OS/Arch: linux/amd64 1.6727691273639407e+09 INFO net-test Starting test {\"Name\": \"Operator Simulator\"} 1.6727691273639877e+09 INFO net-test Testing connectivity {\"PortName\": \"K8s API Server\"} 1.6727691273857167e+09 INFO net-test Testing connectivity PASSED {\"PortName\": \"K8s API Server\", \"Version\": \"v1.24.7\"} 1.6727691273858056e+09 INFO net-test Testing connectivity {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676} 1.6727691273933685e+09 INFO net-test Testing connectivity PASSED {\"Host\": \"net-test-coherence-server.coh-test.svc\", \"PortName\": \"Health\", \"Port\": 6676} The other tests can also be re-run and should also pass. Clean-Up Once the tests are completed, the test servers and Jobs can be deleted. kubectl -n coherence delete -f examples/095_network_policies/manifests/net-test-operator-server.yaml kubectl -n coh-test delete -f examples/095_network_policies/manifests/net-test-coherence-server.yaml ", "title": "Using Network Policies" }, { @@ -1337,7 +1337,7 @@ }, { "location": "/examples/015_simple_image/README", - "text": " To build the image using JIB we need to add the JIB plugin to the project. In the pom.xml file we add JIB to the plugins section. <build> <plugins> <plugin> <groupId>com.google.cloud.tools</groupId> <artifactId>jib-maven-plugin</artifactId> <version>3.4.0</version> </plugin> </plugins> </build> In the build.gradle file we add JIB to the plugins section. plugins { id 'java' id 'com.google.cloud.tools.jib' version '3.4.0' } ", + "text": " To build the image using JIB we need to add the JIB plugin to the project. In the pom.xml file we add JIB to the plugins section. <build> <plugins> <plugin> <groupId>com.google.cloud.tools</groupId> <artifactId>jib-maven-plugin</artifactId> <version>3.4.1</version> </plugin> </plugins> </build> In the build.gradle file we add JIB to the plugins section. plugins { id 'java' id 'com.google.cloud.tools.jib' version '3.4.0' } ", "title": "Add the JIB Plugin" }, { @@ -1372,7 +1372,7 @@ }, { "location": "/examples/015_simple_image/README", - "text": " This example shows how to build a simple Coherence server image using JIB with either Maven or Gradle. When building with Maven the project uses the JIB Maven Plugin . When building with Gradle the project uses the JIB Gradle Plugin . The Coherence Operator has out of the box support for images built with JIB, for example it can automatically detect the class path to use and run the correct main class. This simple application does not actually contain any code, a real application would obviously contain code and other resources. Tip The complete source code for this example is in the Coherence Operator GitHub repository. Add Dependencies To build a Coherence application there will obviously be at a minimum a dependency on coherence.jar . Optionally we can also add dependencies on other Coherence modules. In this example we’re going to add json support to the application by adding a dependency on coherence-json . In the example we use the coherence-bom which ensures that we have consistent use of other Coherence modules. In the pom.xml we have a dependencyManagement section. <dependencyManagement> <dependencies> <dependency> <groupId>com.oracle.coherence.ce</groupId> <artifactId>coherence-bom</artifactId> <version>${coherence.version}</version> <type>pom</type> <scope>import</scope> </dependency> </dependencies> </dependencyManagement> In the build.gradle file we add the bom as a platform dependency. dependencies { implementation platform(\"com.oracle.coherence.ce:coherence-bom:22.06.7\") We can then add the coherence and coherence-json modules as dependencies <dependencies> <dependency> <groupId>com.oracle.coherence.ce</groupId> <artifactId>coherence</artifactId> </dependency> <dependency> <groupId>com.oracle.coherence.ce</groupId> <artifactId>coherence-json</artifactId> </dependency> </dependencies> In the build.gradle file we add the bom as a platform dependency. dependencies { implementation platform(\"com.oracle.coherence.ce:coherence-bom:22.06.7\") implementation \"com.oracle.coherence.ce:coherence\" implementation \"com.oracle.coherence.ce:coherence-json\" } Add the JIB Plugin To build the image using JIB we need to add the JIB plugin to the project. In the pom.xml file we add JIB to the plugins section. <build> <plugins> <plugin> <groupId>com.google.cloud.tools</groupId> <artifactId>jib-maven-plugin</artifactId> <version>3.4.0</version> </plugin> </plugins> </build> In the build.gradle file we add JIB to the plugins section. plugins { id 'java' id 'com.google.cloud.tools.jib' version '3.4.0' } Configure the JIB Plugin Now we can configure the JIB plugin with the properties specific to our image. In this example the configuration is very simple, the JIB plugin documentation shows many more options. We are going to set the following options: * The name and tags for the image we will build. * The main class that we will run as the entry point to the image - in this case com.tangosol.net.Coherence . * The base image. In this example we will us a distroless Java 11 image. A distroless image is more secure as it contains nothing more than core linux and a JRE. There is no shell or other tools to introduce CVEs. The downside of this is that there is no shell, so you cannot exec into the running container, or use a shell script as an entry point. If you don;t need those things a distroless image is a great choice. Maven Configuration In the pom.xml file we configure the plugin where it is declared in the plugins section: <plugin> <groupId>com.google.cloud.tools</groupId> <artifactId>jib-maven-plugin</artifactId> <version>${version.plugin.jib}</version> <configuration> <from>  </from> <to>  <tags> <tag>${project.version}</tag> <tag>latest</tag> </tags> </to> <container> <mainClass>com.tangosol.net.Coherence</mainClass> <format>OCI</format> </container> </configuration> </plugin> The base image will be gcr.io/distroless/java11-debian11 The image name is set to the Maven module name using the property ${project.artifactId} There will be two tags for the image, latest and the project version taken from the ${project.version} property. The main class to use when the image is run is set to com.tangosol.net.Coherence The image type is set to OCI Gradle Configuration In the build.gradle file we configure JIB in the jib section: jib { from { image = 'gcr.io/distroless/java11-debian11' } to { image = \"${project.name}\" tags = [\"${version}\", 'latest'] } container { mainClass = 'com.tangosol.net.Coherence' format = 'OCI' } } The base image will be gcr.io/distroless/java11-debian11 The image name is set to the Maven module name using the property ${project.artifactId} There will be two tags for the image, latest and the project version taken from the ${project.version} property. The main class to use when the image is run is set to com.tangosol.net.Coherence The image type is set to OCI Build the Image To create the server image run the relevant commands as documented in the JIB plugin documentation. In this case we’re going to build the image using Docker, although JIB offers other alternatives. Using Maven we run: ./mvnw compile jib:dockerBuild Using Gradle we run: ./gradlew compileJava jibDockerBuild The command above will create an image named simple-coherence with two tags, latest and 1.0.0 . Listing the local images should show the new images. $ docker images | grep simple simple-coherence 1.0.0 1613cd3b894e 51 years ago 227MB simple-coherence latest 1613cd3b894e 51 years ago 227MB Run the Image The image just built can be run using Docker (or your chosen container tool). In this example we’ll run it interactively, just to prove it runs and starts Coherence. docker run -it --rm simple-coherence:latest The console output should display Coherence starting and finally show the Coherence service list, which will look something like this: Services ( ClusterService{Name=Cluster, State=(SERVICE_STARTED, STATE_JOINED), Id=0, OldestMemberId=1} TransportService{Name=TransportService, State=(SERVICE_STARTED), Id=1, OldestMemberId=1} InvocationService{Name=Management, State=(SERVICE_STARTED), Id=2, OldestMemberId=1} PartitionedCache{Name=$SYS:Config, State=(SERVICE_STARTED), Id=3, OldestMemberId=1, LocalStorage=enabled, PartitionCount=257, BackupCount=1, AssignedPartitions=257, BackupPartitions=0, CoordinatorId=1} PartitionedCache{Name=PartitionedCache, State=(SERVICE_STARTED), Id=4, OldestMemberId=1, LocalStorage=enabled, PartitionCount=257, BackupCount=1, AssignedPartitions=257, BackupPartitions=0, CoordinatorId=1} PartitionedCache{Name=PartitionedTopic, State=(SERVICE_STARTED), Id=5, OldestMemberId=1, LocalStorage=enabled, PartitionCount=257, BackupCount=1, AssignedPartitions=257, BackupPartitions=0, CoordinatorId=1} ProxyService{Name=Proxy, State=(SERVICE_STARTED), Id=6, OldestMemberId=1} ) Press ctrl-C to exit the container, the --rm option we used above wil automatically delete the stopped container. We now have a simple Coherence image we can use in other examples and when trying out the Coherence Operator. Configuring the Image at Runtime With recent Coherence versions, Coherence configuration items that can be set using system properties prefixed with coherence. can also be set using environment variables. This makes it simple to set those properties when running containers because environment variables can be set from the commandline. To set a property the system property name needs to be converted to an environment variable name. This is done by converting the name to uppercase and replacing dots ('.') with underscores ('_'). For example, to set the cluster name we would set the coherence.cluster system property. To run the image and set cluster name with an environment variable we convert coherence.cluster to COHERENCE_CLUSTER and run: docker run -it --rm -e COHERENCE_CLUSTER=my-cluster simple-coherence:latest This is much simpler than trying to change the Java commandline the image entrypoint uses. ", + "text": " This example shows how to build a simple Coherence server image using JIB with either Maven or Gradle. When building with Maven the project uses the JIB Maven Plugin . When building with Gradle the project uses the JIB Gradle Plugin . The Coherence Operator has out of the box support for images built with JIB, for example it can automatically detect the class path to use and run the correct main class. This simple application does not actually contain any code, a real application would obviously contain code and other resources. Tip The complete source code for this example is in the Coherence Operator GitHub repository. Add Dependencies To build a Coherence application there will obviously be at a minimum a dependency on coherence.jar . Optionally we can also add dependencies on other Coherence modules. In this example we’re going to add json support to the application by adding a dependency on coherence-json . In the example we use the coherence-bom which ensures that we have consistent use of other Coherence modules. In the pom.xml we have a dependencyManagement section. <dependencyManagement> <dependencies> <dependency> <groupId>com.oracle.coherence.ce</groupId> <artifactId>coherence-bom</artifactId> <version>${coherence.version}</version> <type>pom</type> <scope>import</scope> </dependency> </dependencies> </dependencyManagement> In the build.gradle file we add the bom as a platform dependency. dependencies { implementation platform(\"com.oracle.coherence.ce:coherence-bom:22.06.7\") We can then add the coherence and coherence-json modules as dependencies <dependencies> <dependency> <groupId>com.oracle.coherence.ce</groupId> <artifactId>coherence</artifactId> </dependency> <dependency> <groupId>com.oracle.coherence.ce</groupId> <artifactId>coherence-json</artifactId> </dependency> </dependencies> In the build.gradle file we add the bom as a platform dependency. dependencies { implementation platform(\"com.oracle.coherence.ce:coherence-bom:22.06.7\") implementation \"com.oracle.coherence.ce:coherence\" implementation \"com.oracle.coherence.ce:coherence-json\" } Add the JIB Plugin To build the image using JIB we need to add the JIB plugin to the project. In the pom.xml file we add JIB to the plugins section. <build> <plugins> <plugin> <groupId>com.google.cloud.tools</groupId> <artifactId>jib-maven-plugin</artifactId> <version>3.4.1</version> </plugin> </plugins> </build> In the build.gradle file we add JIB to the plugins section. plugins { id 'java' id 'com.google.cloud.tools.jib' version '3.4.0' } Configure the JIB Plugin Now we can configure the JIB plugin with the properties specific to our image. In this example the configuration is very simple, the JIB plugin documentation shows many more options. We are going to set the following options: * The name and tags for the image we will build. * The main class that we will run as the entry point to the image - in this case com.tangosol.net.Coherence . * The base image. In this example we will us a distroless Java 11 image. A distroless image is more secure as it contains nothing more than core linux and a JRE. There is no shell or other tools to introduce CVEs. The downside of this is that there is no shell, so you cannot exec into the running container, or use a shell script as an entry point. If you don;t need those things a distroless image is a great choice. Maven Configuration In the pom.xml file we configure the plugin where it is declared in the plugins section: <plugin> <groupId>com.google.cloud.tools</groupId> <artifactId>jib-maven-plugin</artifactId> <version>${version.plugin.jib}</version> <configuration> <from>  </from> <to>  <tags> <tag>${project.version}</tag> <tag>latest</tag> </tags> </to> <container> <mainClass>com.tangosol.net.Coherence</mainClass> <format>OCI</format> </container> </configuration> </plugin> The base image will be gcr.io/distroless/java11-debian11 The image name is set to the Maven module name using the property ${project.artifactId} There will be two tags for the image, latest and the project version taken from the ${project.version} property. The main class to use when the image is run is set to com.tangosol.net.Coherence The image type is set to OCI Gradle Configuration In the build.gradle file we configure JIB in the jib section: jib { from { image = 'gcr.io/distroless/java11-debian11' } to { image = \"${project.name}\" tags = [\"${version}\", 'latest'] } container { mainClass = 'com.tangosol.net.Coherence' format = 'OCI' } } The base image will be gcr.io/distroless/java11-debian11 The image name is set to the Maven module name using the property ${project.artifactId} There will be two tags for the image, latest and the project version taken from the ${project.version} property. The main class to use when the image is run is set to com.tangosol.net.Coherence The image type is set to OCI Build the Image To create the server image run the relevant commands as documented in the JIB plugin documentation. In this case we’re going to build the image using Docker, although JIB offers other alternatives. Using Maven we run: ./mvnw compile jib:dockerBuild Using Gradle we run: ./gradlew compileJava jibDockerBuild The command above will create an image named simple-coherence with two tags, latest and 1.0.0 . Listing the local images should show the new images. $ docker images | grep simple simple-coherence 1.0.0 1613cd3b894e 51 years ago 227MB simple-coherence latest 1613cd3b894e 51 years ago 227MB Run the Image The image just built can be run using Docker (or your chosen container tool). In this example we’ll run it interactively, just to prove it runs and starts Coherence. docker run -it --rm simple-coherence:latest The console output should display Coherence starting and finally show the Coherence service list, which will look something like this: Services ( ClusterService{Name=Cluster, State=(SERVICE_STARTED, STATE_JOINED), Id=0, OldestMemberId=1} TransportService{Name=TransportService, State=(SERVICE_STARTED), Id=1, OldestMemberId=1} InvocationService{Name=Management, State=(SERVICE_STARTED), Id=2, OldestMemberId=1} PartitionedCache{Name=$SYS:Config, State=(SERVICE_STARTED), Id=3, OldestMemberId=1, LocalStorage=enabled, PartitionCount=257, BackupCount=1, AssignedPartitions=257, BackupPartitions=0, CoordinatorId=1} PartitionedCache{Name=PartitionedCache, State=(SERVICE_STARTED), Id=4, OldestMemberId=1, LocalStorage=enabled, PartitionCount=257, BackupCount=1, AssignedPartitions=257, BackupPartitions=0, CoordinatorId=1} PartitionedCache{Name=PartitionedTopic, State=(SERVICE_STARTED), Id=5, OldestMemberId=1, LocalStorage=enabled, PartitionCount=257, BackupCount=1, AssignedPartitions=257, BackupPartitions=0, CoordinatorId=1} ProxyService{Name=Proxy, State=(SERVICE_STARTED), Id=6, OldestMemberId=1} ) Press ctrl-C to exit the container, the --rm option we used above wil automatically delete the stopped container. We now have a simple Coherence image we can use in other examples and when trying out the Coherence Operator. Configuring the Image at Runtime With recent Coherence versions, Coherence configuration items that can be set using system properties prefixed with coherence. can also be set using environment variables. This makes it simple to set those properties when running containers because environment variables can be set from the commandline. To set a property the system property name needs to be converted to an environment variable name. This is done by converting the name to uppercase and replacing dots ('.') with underscores ('_'). For example, to set the cluster name we would set the coherence.cluster system property. To run the image and set cluster name with an environment variable we convert coherence.cluster to COHERENCE_CLUSTER and run: docker run -it --rm -e COHERENCE_CLUSTER=my-cluster simple-coherence:latest This is much simpler than trying to change the Java commandline the image entrypoint uses. ", "title": "Example Coherence Image using JIB" }, { @@ -1402,7 +1402,7 @@ }, { "location": "/docs/metrics/020_metrics", - "text": " From version 3.4.0 of the Coherence Operator, the packaged Grafana dashboards no longer use the vendor: prefix for querying Prometheus metrics. This prefix was deprecated a number of releases ago and the default, of legacy metrics, in Coherence and will be removed in the most recent Coherence releases after this Operator release. If you are using a Coherence cluster version you are using has not yet changed this property, you may see no metrics in the Grafana dashboards. To change your cluster to not use legacy names, set the environment variable COHERENCE_METRICS_LEGACY_NAMES to false in your yaml. apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: metrics-cluster spec: env: - name: \"COHERENCE_METRICS_LEGACY_NAMES\" value: \"false\" coherence: ... has not set \"coherence.metrics.legacy.names=false\" ", + "text": " From version 3.4.1 of the Coherence Operator, the packaged Grafana dashboards no longer use the vendor: prefix for querying Prometheus metrics. This prefix was deprecated a number of releases ago and the default, of legacy metrics, in Coherence and will be removed in the most recent Coherence releases after this Operator release. If you are using a Coherence cluster version you are using has not yet changed this property, you may see no metrics in the Grafana dashboards. To change your cluster to not use legacy names, set the environment variable COHERENCE_METRICS_LEGACY_NAMES to false in your yaml. apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: metrics-cluster spec: env: - name: \"COHERENCE_METRICS_LEGACY_NAMES\" value: \"false\" coherence: ... has not set \"coherence.metrics.legacy.names=false\" ", "title": "Important Note Regarding Prometheus Metrics Prefix" }, { @@ -1427,7 +1427,7 @@ }, { "location": "/docs/metrics/020_metrics", - "text": " Since version 12.2.1.4 Coherence has had the ability to expose a http endpoint that can be used to scrape metrics. This would typically be used to expose metrics to something like Prometheus. The default metrics endpoint is disabled by default in Coherence clusters but can be enabled and configured by setting the relevant fields in the Coherence CRD. If your Coherence version is before CE 21.12.1 this example assumes that your application has included the coherence-metrics module as a dependency. See the Coherence product documentation for more details on enabling metrics in your application. The example below shows how to enable and access Coherence metrics. Once the metrics port has been exposed, for example via a load balancer or port-forward command, the metrics endpoint is available at http://host:port/metrics . See the Using Coherence Metrics documentation for full details on the available metrics. Important Note Regarding Prometheus Metrics Prefix From version 3.4.0 of the Coherence Operator, the packaged Grafana dashboards no longer use the vendor: prefix for querying Prometheus metrics. This prefix was deprecated a number of releases ago and the default, of legacy metrics, in Coherence and will be removed in the most recent Coherence releases after this Operator release. If you are using a Coherence cluster version you are using has not yet changed this property, you may see no metrics in the Grafana dashboards. To change your cluster to not use legacy names, set the environment variable COHERENCE_METRICS_LEGACY_NAMES to false in your yaml. apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: metrics-cluster spec: env: - name: \"COHERENCE_METRICS_LEGACY_NAMES\" value: \"false\" coherence: ... has not set \"coherence.metrics.legacy.names=false\" Deploy Coherence with Metrics Enabled To deploy a Coherence resource with metrics enabled and exposed on a port, the simplest yaml would look like this: apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: metrics-cluster spec: coherence: metrics: enabled: true ports: - name: metrics Setting the coherence.metrics.enabled field to true will enable metrics To expose metrics via a Service it is added to the ports list. The metrics port is a special case where the port number is optional so in this case metrics will bind to the default port 9612 . (see Exposing Ports for details) Expose Metrics on a Different Port To expose metrics on a different port the alternative port value can be set in the coherence.metrics section, for example: apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: metrics-cluster spec: coherence: metrics: enabled: true port: 8080 ports: - name: metrics metrics will now be exposed on port 8080 Port-forward the Metrics Port After installing the basic metrics-cluster.yaml from the first example above there would be a three member Coherence cluster installed into Kubernetes. For example, the cluster can be installed with kubectl kubectl -n coherence-test create -f metrics-cluster.yaml coherence.coherence.oracle.com/metrics-cluster created The kubectl CLI can be used to list Pods for the cluster: kubectl -n coherence-test get pod -l coherenceCluster=metrics-cluster NAME READY STATUS RESTARTS AGE metrics-cluster-0 1/1 Running 0 36s metrics-cluster-1 1/1 Running 0 36s metrics-cluster-2 1/1 Running 0 36s In a test or development environment the simplest way to reach an exposed port is to use the kubectl port-forward command. For example to connect to the first Pod in the deployment: kubectl -n coherence-test port-forward metrics-cluster-0 9612:9612 Forwarding from [::1]:9612 -> 9612 Forwarding from 127.0.0.1:9612 -> 9612 Access the Metrics Endpoint Now that a port has been forwarded from localhost to a Pod in the cluster the metrics endpoint can be accessed. Issue the following curl command to access the REST endpoint: curl http://127.0.0.1:9612/metrics ", + "text": " Since version 12.2.1.4 Coherence has had the ability to expose a http endpoint that can be used to scrape metrics. This would typically be used to expose metrics to something like Prometheus. The default metrics endpoint is disabled by default in Coherence clusters but can be enabled and configured by setting the relevant fields in the Coherence CRD. If your Coherence version is before CE 21.12.1 this example assumes that your application has included the coherence-metrics module as a dependency. See the Coherence product documentation for more details on enabling metrics in your application. The example below shows how to enable and access Coherence metrics. Once the metrics port has been exposed, for example via a load balancer or port-forward command, the metrics endpoint is available at http://host:port/metrics . See the Using Coherence Metrics documentation for full details on the available metrics. Important Note Regarding Prometheus Metrics Prefix From version 3.4.1 of the Coherence Operator, the packaged Grafana dashboards no longer use the vendor: prefix for querying Prometheus metrics. This prefix was deprecated a number of releases ago and the default, of legacy metrics, in Coherence and will be removed in the most recent Coherence releases after this Operator release. If you are using a Coherence cluster version you are using has not yet changed this property, you may see no metrics in the Grafana dashboards. To change your cluster to not use legacy names, set the environment variable COHERENCE_METRICS_LEGACY_NAMES to false in your yaml. apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: metrics-cluster spec: env: - name: \"COHERENCE_METRICS_LEGACY_NAMES\" value: \"false\" coherence: ... has not set \"coherence.metrics.legacy.names=false\" Deploy Coherence with Metrics Enabled To deploy a Coherence resource with metrics enabled and exposed on a port, the simplest yaml would look like this: apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: metrics-cluster spec: coherence: metrics: enabled: true ports: - name: metrics Setting the coherence.metrics.enabled field to true will enable metrics To expose metrics via a Service it is added to the ports list. The metrics port is a special case where the port number is optional so in this case metrics will bind to the default port 9612 . (see Exposing Ports for details) Expose Metrics on a Different Port To expose metrics on a different port the alternative port value can be set in the coherence.metrics section, for example: apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: metrics-cluster spec: coherence: metrics: enabled: true port: 8080 ports: - name: metrics metrics will now be exposed on port 8080 Port-forward the Metrics Port After installing the basic metrics-cluster.yaml from the first example above there would be a three member Coherence cluster installed into Kubernetes. For example, the cluster can be installed with kubectl kubectl -n coherence-test create -f metrics-cluster.yaml coherence.coherence.oracle.com/metrics-cluster created The kubectl CLI can be used to list Pods for the cluster: kubectl -n coherence-test get pod -l coherenceCluster=metrics-cluster NAME READY STATUS RESTARTS AGE metrics-cluster-0 1/1 Running 0 36s metrics-cluster-1 1/1 Running 0 36s metrics-cluster-2 1/1 Running 0 36s In a test or development environment the simplest way to reach an exposed port is to use the kubectl port-forward command. For example to connect to the first Pod in the deployment: kubectl -n coherence-test port-forward metrics-cluster-0 9612:9612 Forwarding from [::1]:9612 -> 9612 Forwarding from 127.0.0.1:9612 -> 9612 Access the Metrics Endpoint Now that a port has been forwarded from localhost to a Pod in the cluster the metrics endpoint can be accessed. Issue the following curl command to access the REST endpoint: curl http://127.0.0.1:9612/metrics ", "title": "Publish Metrics" }, { @@ -1527,7 +1527,7 @@ }, { "location": "/examples/no-operator/04_istio/README", - "text": " For this example we need a simple client image that can be run with different configurations. Instead of building an application we will use a Coherence Image from GitHub combined with the utilities from the Coherence Operator. The simple Dockerfile below is a multistage build file. It uses the Operator image as a \"builder\" and then the Coherence image as the base. Various utilities are copied from the Operator image into the base. FROM ghcr.io/oracle/coherence-operator:3.4.0 AS Builder FROM ghcr.io/oracle/coherence-ce:22.06.7 COPY --from=Builder /files /files COPY --from=Builder /files/lib/coherence-operator.jar /app/libs/coherence-operator.jar COPY coherence-java-client-22.06.7.jar /app/libs/coherence-java-client-22.06.7.jar ENTRYPOINT [\"files/runner\"] CMD [\"-h\"] As we are going to show both the Coherence Extend client and gRPC client we need to add the Coherence gRPC client jar. We can download this with curl to the same directory as the Dockerfile. curl -s https://repo1.maven.org/maven2/com/oracle/coherence/ce/coherence-java-client/22.06.7/coherence-java-client-22.06.7.jar \\ -o coherence-java-client-22.06.7.jar Build the image with the following command: docker build -t coherence-client:1.0.0 -f Dockerfile . There will now be an imaged named coherence-client:1.0.0 which can be pushed somewhere Kubernetes can see it. We will use this example below. ", + "text": " For this example we need a simple client image that can be run with different configurations. Instead of building an application we will use a Coherence Image from GitHub combined with the utilities from the Coherence Operator. The simple Dockerfile below is a multistage build file. It uses the Operator image as a \"builder\" and then the Coherence image as the base. Various utilities are copied from the Operator image into the base. FROM ghcr.io/oracle/coherence-operator:3.4.1 AS Builder FROM ghcr.io/oracle/coherence-ce:22.06.7 COPY --from=Builder /files /files COPY --from=Builder /files/lib/coherence-operator.jar /app/libs/coherence-operator.jar COPY coherence-java-client-22.06.7.jar /app/libs/coherence-java-client-22.06.7.jar ENTRYPOINT [\"files/runner\"] CMD [\"-h\"] As we are going to show both the Coherence Extend client and gRPC client we need to add the Coherence gRPC client jar. We can download this with curl to the same directory as the Dockerfile. curl -s https://repo1.maven.org/maven2/com/oracle/coherence/ce/coherence-java-client/22.06.7/coherence-java-client-22.06.7.jar \\ -o coherence-java-client-22.06.7.jar Build the image with the following command: docker build -t coherence-client:1.0.0 -f Dockerfile . There will now be an imaged named coherence-client:1.0.0 which can be pushed somewhere Kubernetes can see it. We will use this example below. ", "title": "Build a Client Image" }, { @@ -1547,7 +1547,7 @@ }, { "location": "/examples/no-operator/04_istio/README", - "text": " If the clients are also inside the cluster they can be configured to connect using the StatefulSet headless service as the hostname for the proxy endpoints. There are two options for configuring Extend and Clients inside Kubernetes can also use the minimal Coherence NameService configuration where the StatefulSet service name is used as the client’s WKA address and the same cluster name is configured. Clients external to the Kubernetes cluster can be configured using any of the ingress or gateway features of Istio and Kubernetes. All the different ways to do this are beyond the scope of this simple example as there are many, and they depend on the versions of Istio and Kubernetes being used. Build a Client Image For this example we need a simple client image that can be run with different configurations. Instead of building an application we will use a Coherence Image from GitHub combined with the utilities from the Coherence Operator. The simple Dockerfile below is a multistage build file. It uses the Operator image as a \"builder\" and then the Coherence image as the base. Various utilities are copied from the Operator image into the base. FROM ghcr.io/oracle/coherence-operator:3.4.0 AS Builder FROM ghcr.io/oracle/coherence-ce:22.06.7 COPY --from=Builder /files /files COPY --from=Builder /files/lib/coherence-operator.jar /app/libs/coherence-operator.jar COPY coherence-java-client-22.06.7.jar /app/libs/coherence-java-client-22.06.7.jar ENTRYPOINT [\"files/runner\"] CMD [\"-h\"] As we are going to show both the Coherence Extend client and gRPC client we need to add the Coherence gRPC client jar. We can download this with curl to the same directory as the Dockerfile. curl -s https://repo1.maven.org/maven2/com/oracle/coherence/ce/coherence-java-client/22.06.7/coherence-java-client-22.06.7.jar \\ -o coherence-java-client-22.06.7.jar Build the image with the following command: docker build -t coherence-client:1.0.0 -f Dockerfile . There will now be an imaged named coherence-client:1.0.0 which can be pushed somewhere Kubernetes can see it. We will use this example below. Using the Coherence NameService Configuration The minimal configuration in a client’s cache configuration file is shown below. This configuration will use the Coherence NameService to look up the endpoints for the Extend Proxy services running in the Coherence cluster. <remote-cache-scheme> <scheme-name>thin-remote</scheme-name> <service-name>RemoteCache</service-name> <proxy-service-name>Proxy</proxy-service-name> </remote-cache-scheme> For the NameService to work in Kubernetes, the client must be configured with the same cluster name, the same well known addresses and same cluster port as the server. When using Istio the server’s cluster port, local port and Extend port should be exposed on the StatefulSet headless service. The client’s well known address is then set to the qualified Kubernetes DNS name for the server’s StatefulSet headless service. These can all be set using environment variables in the yaml for the client. For example, assuming the client will connect to the Coherence cluster configured in the StatefulSet above: env: - name: COHERENCE_CLUSTER value: \"test-cluster\" - name: COHERENCE_WKA value: \"storage-headless.coherence.svc\" The cluster name is set to test-cluster the same as the StatefulSet The COHERENCE_WKA value is set to the DNS name of the StatefulSet headless service, which has the format <service-name>.<namespace>.svc so in this case storage-headless.coherence.svc Run an Extend Client Pod Using the coherence-client:1.0.0 image created above, we can run a simple Coherence client Pod. apiVersion: v1 kind: Pod metadata: name: client labels: app: coherence-client version: 1.0.0 spec: containers: - name: coherence image: coherence-client:1.0.0 command: - /files/runner - sleep - \"15m\" env: - name: COHERENCE_CLUSTER value: \"test-cluster\" - name: COHERENCE_WKA value: \"storage-headless.coherence.svc\" - name: COHERENCE_CLIENT value: \"remote\" The container image is set to the client image built above coherence-client:1.0.0 The command line the container will run is /files/runner sleep 15m which will just sleep for 15 minutes The Coherence cluster name is set to the same name as the server deployed above in the StatefulSet yaml The WKA address is set to the StatefulSet’s headless service name storage-headless.coherence.svc For this example the COHERENCE_CLIENT which sets the default cache configuration file to run as an Extend client, using the NameService to look up the proxies. We can deploy the client into Kubernetes kubectl -n coherence apply -f extend-client-pod.yaml We deployed the client into the same namespace as the cluster, we could easily have deployed it to another namespace. If we list the Pods we will see the cluster and the client. All Pods has two containers, one being the Istio side-car. $ k -n coherence get pod NAME READY STATUS RESTARTS AGE storage-0 2/2 Running 0 105m storage-1 2/2 Running 0 105m storage-2 2/2 Running 0 105m client 2/2 Running 0 8m27s Now we can exec into the Pod and start a Coherence QueryPlus console session using the following command: kubectl -n coherence exec -it client -- /files/runner queryplus The QueryPlus session will start and eventually display the CohQL> prompt: Coherence Command Line Tool CohQL> A simple command to try is just creating a cache, so at the prompt type the command create cache test which will create a cache named test . If all is configured correctly this client will connect to the cluster over Extend and create the cache called test and return to the CohQL prompt. Coherence Command Line Tool CohQL> create cache test We can also try selecting data from the cache using the CohQL query select * from test (which will return nothing as the cache is empty). CohQL> select * from test Results CohQL> If we now look at the Kiali dashboard we can see that the client application has communicated with the storage cluster. All of this communication was using mTLS but without configuring Coherence to use TLS. If we look at the Kiali dashboard traffic tab for the client application we can see the traffic was TCP over mTLS. To exit from the CohQL> prompt type the bye command. The delete the client Pod kubectl -n coherence delete -f extend-client-pod.yaml Run a gRPC Client Pod We can run the same image as a gRPC client. For this example, instead of the NameService we will configure Coherence to apiVersion: v1 kind: Pod metadata: name: client labels: app: coherence-client version: 1.0.0 spec: containers: - name: coherence image: coherence-client:1.0.0 command: - /files/runner - sleep - \"15m\" env: - name: COHERENCE_CLIENT value: \"grpc-fixed\" - name: COHERENCE_GRPC_ADDRESS value: \"storage-headless.coherence.svc\" - name: COHERENCE_GRPC_PORT value: \"1408\" We can now deploy the gRPC client Pod kubectl -n coherence delete -f grpc-client-pod.yaml And exec into the Pod to create a QueryPlus session. kubectl -n coherence exec -it client -- /files/runner queryplus We can run the same create cache test and select * from test command that we ran above to connect the client to the cluster. This time the client should be connecting over gRPC. If we now look at the Kiali dashboard we can see again that the client application has communicated with the storage cluster. All of this communication was using mTLS but without configuring Coherence to use TLS. If we look at the Kiali dashboard traffic tab for the client application we can see that this time the traffic was gRPC over mTLS. To exit from the CohQL> prompt type the bye command. The delete the client Pod kubectl -n coherence delete -f extend-client-pod.yaml ", + "text": " If the clients are also inside the cluster they can be configured to connect using the StatefulSet headless service as the hostname for the proxy endpoints. There are two options for configuring Extend and Clients inside Kubernetes can also use the minimal Coherence NameService configuration where the StatefulSet service name is used as the client’s WKA address and the same cluster name is configured. Clients external to the Kubernetes cluster can be configured using any of the ingress or gateway features of Istio and Kubernetes. All the different ways to do this are beyond the scope of this simple example as there are many, and they depend on the versions of Istio and Kubernetes being used. Build a Client Image For this example we need a simple client image that can be run with different configurations. Instead of building an application we will use a Coherence Image from GitHub combined with the utilities from the Coherence Operator. The simple Dockerfile below is a multistage build file. It uses the Operator image as a \"builder\" and then the Coherence image as the base. Various utilities are copied from the Operator image into the base. FROM ghcr.io/oracle/coherence-operator:3.4.1 AS Builder FROM ghcr.io/oracle/coherence-ce:22.06.7 COPY --from=Builder /files /files COPY --from=Builder /files/lib/coherence-operator.jar /app/libs/coherence-operator.jar COPY coherence-java-client-22.06.7.jar /app/libs/coherence-java-client-22.06.7.jar ENTRYPOINT [\"files/runner\"] CMD [\"-h\"] As we are going to show both the Coherence Extend client and gRPC client we need to add the Coherence gRPC client jar. We can download this with curl to the same directory as the Dockerfile. curl -s https://repo1.maven.org/maven2/com/oracle/coherence/ce/coherence-java-client/22.06.7/coherence-java-client-22.06.7.jar \\ -o coherence-java-client-22.06.7.jar Build the image with the following command: docker build -t coherence-client:1.0.0 -f Dockerfile . There will now be an imaged named coherence-client:1.0.0 which can be pushed somewhere Kubernetes can see it. We will use this example below. Using the Coherence NameService Configuration The minimal configuration in a client’s cache configuration file is shown below. This configuration will use the Coherence NameService to look up the endpoints for the Extend Proxy services running in the Coherence cluster. <remote-cache-scheme> <scheme-name>thin-remote</scheme-name> <service-name>RemoteCache</service-name> <proxy-service-name>Proxy</proxy-service-name> </remote-cache-scheme> For the NameService to work in Kubernetes, the client must be configured with the same cluster name, the same well known addresses and same cluster port as the server. When using Istio the server’s cluster port, local port and Extend port should be exposed on the StatefulSet headless service. The client’s well known address is then set to the qualified Kubernetes DNS name for the server’s StatefulSet headless service. These can all be set using environment variables in the yaml for the client. For example, assuming the client will connect to the Coherence cluster configured in the StatefulSet above: env: - name: COHERENCE_CLUSTER value: \"test-cluster\" - name: COHERENCE_WKA value: \"storage-headless.coherence.svc\" The cluster name is set to test-cluster the same as the StatefulSet The COHERENCE_WKA value is set to the DNS name of the StatefulSet headless service, which has the format <service-name>.<namespace>.svc so in this case storage-headless.coherence.svc Run an Extend Client Pod Using the coherence-client:1.0.0 image created above, we can run a simple Coherence client Pod. apiVersion: v1 kind: Pod metadata: name: client labels: app: coherence-client version: 1.0.0 spec: containers: - name: coherence image: coherence-client:1.0.0 command: - /files/runner - sleep - \"15m\" env: - name: COHERENCE_CLUSTER value: \"test-cluster\" - name: COHERENCE_WKA value: \"storage-headless.coherence.svc\" - name: COHERENCE_CLIENT value: \"remote\" The container image is set to the client image built above coherence-client:1.0.0 The command line the container will run is /files/runner sleep 15m which will just sleep for 15 minutes The Coherence cluster name is set to the same name as the server deployed above in the StatefulSet yaml The WKA address is set to the StatefulSet’s headless service name storage-headless.coherence.svc For this example the COHERENCE_CLIENT which sets the default cache configuration file to run as an Extend client, using the NameService to look up the proxies. We can deploy the client into Kubernetes kubectl -n coherence apply -f extend-client-pod.yaml We deployed the client into the same namespace as the cluster, we could easily have deployed it to another namespace. If we list the Pods we will see the cluster and the client. All Pods has two containers, one being the Istio side-car. $ k -n coherence get pod NAME READY STATUS RESTARTS AGE storage-0 2/2 Running 0 105m storage-1 2/2 Running 0 105m storage-2 2/2 Running 0 105m client 2/2 Running 0 8m27s Now we can exec into the Pod and start a Coherence QueryPlus console session using the following command: kubectl -n coherence exec -it client -- /files/runner queryplus The QueryPlus session will start and eventually display the CohQL> prompt: Coherence Command Line Tool CohQL> A simple command to try is just creating a cache, so at the prompt type the command create cache test which will create a cache named test . If all is configured correctly this client will connect to the cluster over Extend and create the cache called test and return to the CohQL prompt. Coherence Command Line Tool CohQL> create cache test We can also try selecting data from the cache using the CohQL query select * from test (which will return nothing as the cache is empty). CohQL> select * from test Results CohQL> If we now look at the Kiali dashboard we can see that the client application has communicated with the storage cluster. All of this communication was using mTLS but without configuring Coherence to use TLS. If we look at the Kiali dashboard traffic tab for the client application we can see the traffic was TCP over mTLS. To exit from the CohQL> prompt type the bye command. The delete the client Pod kubectl -n coherence delete -f extend-client-pod.yaml Run a gRPC Client Pod We can run the same image as a gRPC client. For this example, instead of the NameService we will configure Coherence to apiVersion: v1 kind: Pod metadata: name: client labels: app: coherence-client version: 1.0.0 spec: containers: - name: coherence image: coherence-client:1.0.0 command: - /files/runner - sleep - \"15m\" env: - name: COHERENCE_CLIENT value: \"grpc-fixed\" - name: COHERENCE_GRPC_ADDRESS value: \"storage-headless.coherence.svc\" - name: COHERENCE_GRPC_PORT value: \"1408\" We can now deploy the gRPC client Pod kubectl -n coherence delete -f grpc-client-pod.yaml And exec into the Pod to create a QueryPlus session. kubectl -n coherence exec -it client -- /files/runner queryplus We can run the same create cache test and select * from test command that we ran above to connect the client to the cluster. This time the client should be connecting over gRPC. If we now look at the Kiali dashboard we can see again that the client application has communicated with the storage cluster. All of this communication was using mTLS but without configuring Coherence to use TLS. If we look at the Kiali dashboard traffic tab for the client application we can see that this time the traffic was gRPC over mTLS. To exit from the CohQL> prompt type the bye command. The delete the client Pod kubectl -n coherence delete -f extend-client-pod.yaml ", "title": "Clients Inside Kubernetes" }, { @@ -1557,7 +1557,7 @@ }, { "location": "/examples/no-operator/04_istio/README", - "text": " Coherence clients (Extend or gRPC) can be configured to connect to the Coherence cluster. Extend Proxy Configuration To work correctly with Istio a Coherence Extend proxy in the server’s cache configuration file must be configured to use a fixed port. For example, the XML snippet below configures the proxy to bind to all interfaces ( 0.0.0.0 ) on port 20000. <proxy-scheme> <service-name>Proxy</service-name> <acceptor-config> <tcp-acceptor> <local-address> <address system-property=\"coherence.extend.address\">0.0.0.0</address> <port system-property=\"coherence.extend.port\">20000</port> </local-address> </tcp-acceptor> </acceptor-config> <autostart>true</autostart> </proxy-scheme> The port could be changed by setting the COHERENCE_EXTEND_PORT environment variable in the server yaml. spec: containers: - name: coherence image: ghcr.io/oracle/coherence-ce:22.06.7 env: - name: COHERENCE_EXTEND_PORT value: \"20001\" The Extend port should be exposed on the StatefulSet (as shown in the StatefulSet yaml above) and on the StatefulSet headless service so that clients can discover it and connect to it (as shown in the Service yaml above). Tip The default cache configuration file used by Coherence, and used in the Coherence images published on GitHub, contains an Extend Proxy service that uses the COHERENCE_EXTEND_PORT environment variable to set the port. gRPC Proxy Configuration The Coherence gRPC proxy binds to an ephemeral port by default. This port can be changed by using the COHERENCE_GRPC_SERVER_PORT environment variable; spec: containers: - name: coherence image: ghcr.io/oracle/coherence-ce:22.06.7 env: - name: COHERENCE_GRPC_SERVER_PORT value: \"1408\" Tip The default configuration used by Coherence images published on GitHub sets the gRPC port to 1408. Once the server StatefulSet and Service have been properly configured the clients can be configured. The options available for this will depend on where the client will run. Clients Inside Kubernetes If the clients are also inside the cluster they can be configured to connect using the StatefulSet headless service as the hostname for the proxy endpoints. There are two options for configuring Extend and Clients inside Kubernetes can also use the minimal Coherence NameService configuration where the StatefulSet service name is used as the client’s WKA address and the same cluster name is configured. Clients external to the Kubernetes cluster can be configured using any of the ingress or gateway features of Istio and Kubernetes. All the different ways to do this are beyond the scope of this simple example as there are many, and they depend on the versions of Istio and Kubernetes being used. Build a Client Image For this example we need a simple client image that can be run with different configurations. Instead of building an application we will use a Coherence Image from GitHub combined with the utilities from the Coherence Operator. The simple Dockerfile below is a multistage build file. It uses the Operator image as a \"builder\" and then the Coherence image as the base. Various utilities are copied from the Operator image into the base. FROM ghcr.io/oracle/coherence-operator:3.4.0 AS Builder FROM ghcr.io/oracle/coherence-ce:22.06.7 COPY --from=Builder /files /files COPY --from=Builder /files/lib/coherence-operator.jar /app/libs/coherence-operator.jar COPY coherence-java-client-22.06.7.jar /app/libs/coherence-java-client-22.06.7.jar ENTRYPOINT [\"files/runner\"] CMD [\"-h\"] As we are going to show both the Coherence Extend client and gRPC client we need to add the Coherence gRPC client jar. We can download this with curl to the same directory as the Dockerfile. curl -s https://repo1.maven.org/maven2/com/oracle/coherence/ce/coherence-java-client/22.06.7/coherence-java-client-22.06.7.jar \\ -o coherence-java-client-22.06.7.jar Build the image with the following command: docker build -t coherence-client:1.0.0 -f Dockerfile . There will now be an imaged named coherence-client:1.0.0 which can be pushed somewhere Kubernetes can see it. We will use this example below. Using the Coherence NameService Configuration The minimal configuration in a client’s cache configuration file is shown below. This configuration will use the Coherence NameService to look up the endpoints for the Extend Proxy services running in the Coherence cluster. <remote-cache-scheme> <scheme-name>thin-remote</scheme-name> <service-name>RemoteCache</service-name> <proxy-service-name>Proxy</proxy-service-name> </remote-cache-scheme> For the NameService to work in Kubernetes, the client must be configured with the same cluster name, the same well known addresses and same cluster port as the server. When using Istio the server’s cluster port, local port and Extend port should be exposed on the StatefulSet headless service. The client’s well known address is then set to the qualified Kubernetes DNS name for the server’s StatefulSet headless service. These can all be set using environment variables in the yaml for the client. For example, assuming the client will connect to the Coherence cluster configured in the StatefulSet above: env: - name: COHERENCE_CLUSTER value: \"test-cluster\" - name: COHERENCE_WKA value: \"storage-headless.coherence.svc\" The cluster name is set to test-cluster the same as the StatefulSet The COHERENCE_WKA value is set to the DNS name of the StatefulSet headless service, which has the format <service-name>.<namespace>.svc so in this case storage-headless.coherence.svc Run an Extend Client Pod Using the coherence-client:1.0.0 image created above, we can run a simple Coherence client Pod. apiVersion: v1 kind: Pod metadata: name: client labels: app: coherence-client version: 1.0.0 spec: containers: - name: coherence image: coherence-client:1.0.0 command: - /files/runner - sleep - \"15m\" env: - name: COHERENCE_CLUSTER value: \"test-cluster\" - name: COHERENCE_WKA value: \"storage-headless.coherence.svc\" - name: COHERENCE_CLIENT value: \"remote\" The container image is set to the client image built above coherence-client:1.0.0 The command line the container will run is /files/runner sleep 15m which will just sleep for 15 minutes The Coherence cluster name is set to the same name as the server deployed above in the StatefulSet yaml The WKA address is set to the StatefulSet’s headless service name storage-headless.coherence.svc For this example the COHERENCE_CLIENT which sets the default cache configuration file to run as an Extend client, using the NameService to look up the proxies. We can deploy the client into Kubernetes kubectl -n coherence apply -f extend-client-pod.yaml We deployed the client into the same namespace as the cluster, we could easily have deployed it to another namespace. If we list the Pods we will see the cluster and the client. All Pods has two containers, one being the Istio side-car. $ k -n coherence get pod NAME READY STATUS RESTARTS AGE storage-0 2/2 Running 0 105m storage-1 2/2 Running 0 105m storage-2 2/2 Running 0 105m client 2/2 Running 0 8m27s Now we can exec into the Pod and start a Coherence QueryPlus console session using the following command: kubectl -n coherence exec -it client -- /files/runner queryplus The QueryPlus session will start and eventually display the CohQL> prompt: Coherence Command Line Tool CohQL> A simple command to try is just creating a cache, so at the prompt type the command create cache test which will create a cache named test . If all is configured correctly this client will connect to the cluster over Extend and create the cache called test and return to the CohQL prompt. Coherence Command Line Tool CohQL> create cache test We can also try selecting data from the cache using the CohQL query select * from test (which will return nothing as the cache is empty). CohQL> select * from test Results CohQL> If we now look at the Kiali dashboard we can see that the client application has communicated with the storage cluster. All of this communication was using mTLS but without configuring Coherence to use TLS. If we look at the Kiali dashboard traffic tab for the client application we can see the traffic was TCP over mTLS. To exit from the CohQL> prompt type the bye command. The delete the client Pod kubectl -n coherence delete -f extend-client-pod.yaml Run a gRPC Client Pod We can run the same image as a gRPC client. For this example, instead of the NameService we will configure Coherence to apiVersion: v1 kind: Pod metadata: name: client labels: app: coherence-client version: 1.0.0 spec: containers: - name: coherence image: coherence-client:1.0.0 command: - /files/runner - sleep - \"15m\" env: - name: COHERENCE_CLIENT value: \"grpc-fixed\" - name: COHERENCE_GRPC_ADDRESS value: \"storage-headless.coherence.svc\" - name: COHERENCE_GRPC_PORT value: \"1408\" We can now deploy the gRPC client Pod kubectl -n coherence delete -f grpc-client-pod.yaml And exec into the Pod to create a QueryPlus session. kubectl -n coherence exec -it client -- /files/runner queryplus We can run the same create cache test and select * from test command that we ran above to connect the client to the cluster. This time the client should be connecting over gRPC. If we now look at the Kiali dashboard we can see again that the client application has communicated with the storage cluster. All of this communication was using mTLS but without configuring Coherence to use TLS. If we look at the Kiali dashboard traffic tab for the client application we can see that this time the traffic was gRPC over mTLS. To exit from the CohQL> prompt type the bye command. The delete the client Pod kubectl -n coherence delete -f extend-client-pod.yaml Clients Outside Kubernetes When connecting Coherence Extend or gRPC clients from outside Kubernetes, the Coherence NameService cannot be used by clients to look up the endpoints. The clients must be configured with fixed endpoints using the hostnames and ports of the configured ingress or gateway services. Exactly how this is done will depend on the versions of Istio and Kubernetes being used and whether Ingress or the Kubernetes Gateway API is used. The different options available make it impossible to build an example that can cover all these scenarios. ", + "text": " Coherence clients (Extend or gRPC) can be configured to connect to the Coherence cluster. Extend Proxy Configuration To work correctly with Istio a Coherence Extend proxy in the server’s cache configuration file must be configured to use a fixed port. For example, the XML snippet below configures the proxy to bind to all interfaces ( 0.0.0.0 ) on port 20000. <proxy-scheme> <service-name>Proxy</service-name> <acceptor-config> <tcp-acceptor> <local-address> <address system-property=\"coherence.extend.address\">0.0.0.0</address> <port system-property=\"coherence.extend.port\">20000</port> </local-address> </tcp-acceptor> </acceptor-config> <autostart>true</autostart> </proxy-scheme> The port could be changed by setting the COHERENCE_EXTEND_PORT environment variable in the server yaml. spec: containers: - name: coherence image: ghcr.io/oracle/coherence-ce:22.06.7 env: - name: COHERENCE_EXTEND_PORT value: \"20001\" The Extend port should be exposed on the StatefulSet (as shown in the StatefulSet yaml above) and on the StatefulSet headless service so that clients can discover it and connect to it (as shown in the Service yaml above). Tip The default cache configuration file used by Coherence, and used in the Coherence images published on GitHub, contains an Extend Proxy service that uses the COHERENCE_EXTEND_PORT environment variable to set the port. gRPC Proxy Configuration The Coherence gRPC proxy binds to an ephemeral port by default. This port can be changed by using the COHERENCE_GRPC_SERVER_PORT environment variable; spec: containers: - name: coherence image: ghcr.io/oracle/coherence-ce:22.06.7 env: - name: COHERENCE_GRPC_SERVER_PORT value: \"1408\" Tip The default configuration used by Coherence images published on GitHub sets the gRPC port to 1408. Once the server StatefulSet and Service have been properly configured the clients can be configured. The options available for this will depend on where the client will run. Clients Inside Kubernetes If the clients are also inside the cluster they can be configured to connect using the StatefulSet headless service as the hostname for the proxy endpoints. There are two options for configuring Extend and Clients inside Kubernetes can also use the minimal Coherence NameService configuration where the StatefulSet service name is used as the client’s WKA address and the same cluster name is configured. Clients external to the Kubernetes cluster can be configured using any of the ingress or gateway features of Istio and Kubernetes. All the different ways to do this are beyond the scope of this simple example as there are many, and they depend on the versions of Istio and Kubernetes being used. Build a Client Image For this example we need a simple client image that can be run with different configurations. Instead of building an application we will use a Coherence Image from GitHub combined with the utilities from the Coherence Operator. The simple Dockerfile below is a multistage build file. It uses the Operator image as a \"builder\" and then the Coherence image as the base. Various utilities are copied from the Operator image into the base. FROM ghcr.io/oracle/coherence-operator:3.4.1 AS Builder FROM ghcr.io/oracle/coherence-ce:22.06.7 COPY --from=Builder /files /files COPY --from=Builder /files/lib/coherence-operator.jar /app/libs/coherence-operator.jar COPY coherence-java-client-22.06.7.jar /app/libs/coherence-java-client-22.06.7.jar ENTRYPOINT [\"files/runner\"] CMD [\"-h\"] As we are going to show both the Coherence Extend client and gRPC client we need to add the Coherence gRPC client jar. We can download this with curl to the same directory as the Dockerfile. curl -s https://repo1.maven.org/maven2/com/oracle/coherence/ce/coherence-java-client/22.06.7/coherence-java-client-22.06.7.jar \\ -o coherence-java-client-22.06.7.jar Build the image with the following command: docker build -t coherence-client:1.0.0 -f Dockerfile . There will now be an imaged named coherence-client:1.0.0 which can be pushed somewhere Kubernetes can see it. We will use this example below. Using the Coherence NameService Configuration The minimal configuration in a client’s cache configuration file is shown below. This configuration will use the Coherence NameService to look up the endpoints for the Extend Proxy services running in the Coherence cluster. <remote-cache-scheme> <scheme-name>thin-remote</scheme-name> <service-name>RemoteCache</service-name> <proxy-service-name>Proxy</proxy-service-name> </remote-cache-scheme> For the NameService to work in Kubernetes, the client must be configured with the same cluster name, the same well known addresses and same cluster port as the server. When using Istio the server’s cluster port, local port and Extend port should be exposed on the StatefulSet headless service. The client’s well known address is then set to the qualified Kubernetes DNS name for the server’s StatefulSet headless service. These can all be set using environment variables in the yaml for the client. For example, assuming the client will connect to the Coherence cluster configured in the StatefulSet above: env: - name: COHERENCE_CLUSTER value: \"test-cluster\" - name: COHERENCE_WKA value: \"storage-headless.coherence.svc\" The cluster name is set to test-cluster the same as the StatefulSet The COHERENCE_WKA value is set to the DNS name of the StatefulSet headless service, which has the format <service-name>.<namespace>.svc so in this case storage-headless.coherence.svc Run an Extend Client Pod Using the coherence-client:1.0.0 image created above, we can run a simple Coherence client Pod. apiVersion: v1 kind: Pod metadata: name: client labels: app: coherence-client version: 1.0.0 spec: containers: - name: coherence image: coherence-client:1.0.0 command: - /files/runner - sleep - \"15m\" env: - name: COHERENCE_CLUSTER value: \"test-cluster\" - name: COHERENCE_WKA value: \"storage-headless.coherence.svc\" - name: COHERENCE_CLIENT value: \"remote\" The container image is set to the client image built above coherence-client:1.0.0 The command line the container will run is /files/runner sleep 15m which will just sleep for 15 minutes The Coherence cluster name is set to the same name as the server deployed above in the StatefulSet yaml The WKA address is set to the StatefulSet’s headless service name storage-headless.coherence.svc For this example the COHERENCE_CLIENT which sets the default cache configuration file to run as an Extend client, using the NameService to look up the proxies. We can deploy the client into Kubernetes kubectl -n coherence apply -f extend-client-pod.yaml We deployed the client into the same namespace as the cluster, we could easily have deployed it to another namespace. If we list the Pods we will see the cluster and the client. All Pods has two containers, one being the Istio side-car. $ k -n coherence get pod NAME READY STATUS RESTARTS AGE storage-0 2/2 Running 0 105m storage-1 2/2 Running 0 105m storage-2 2/2 Running 0 105m client 2/2 Running 0 8m27s Now we can exec into the Pod and start a Coherence QueryPlus console session using the following command: kubectl -n coherence exec -it client -- /files/runner queryplus The QueryPlus session will start and eventually display the CohQL> prompt: Coherence Command Line Tool CohQL> A simple command to try is just creating a cache, so at the prompt type the command create cache test which will create a cache named test . If all is configured correctly this client will connect to the cluster over Extend and create the cache called test and return to the CohQL prompt. Coherence Command Line Tool CohQL> create cache test We can also try selecting data from the cache using the CohQL query select * from test (which will return nothing as the cache is empty). CohQL> select * from test Results CohQL> If we now look at the Kiali dashboard we can see that the client application has communicated with the storage cluster. All of this communication was using mTLS but without configuring Coherence to use TLS. If we look at the Kiali dashboard traffic tab for the client application we can see the traffic was TCP over mTLS. To exit from the CohQL> prompt type the bye command. The delete the client Pod kubectl -n coherence delete -f extend-client-pod.yaml Run a gRPC Client Pod We can run the same image as a gRPC client. For this example, instead of the NameService we will configure Coherence to apiVersion: v1 kind: Pod metadata: name: client labels: app: coherence-client version: 1.0.0 spec: containers: - name: coherence image: coherence-client:1.0.0 command: - /files/runner - sleep - \"15m\" env: - name: COHERENCE_CLIENT value: \"grpc-fixed\" - name: COHERENCE_GRPC_ADDRESS value: \"storage-headless.coherence.svc\" - name: COHERENCE_GRPC_PORT value: \"1408\" We can now deploy the gRPC client Pod kubectl -n coherence delete -f grpc-client-pod.yaml And exec into the Pod to create a QueryPlus session. kubectl -n coherence exec -it client -- /files/runner queryplus We can run the same create cache test and select * from test command that we ran above to connect the client to the cluster. This time the client should be connecting over gRPC. If we now look at the Kiali dashboard we can see again that the client application has communicated with the storage cluster. All of this communication was using mTLS but without configuring Coherence to use TLS. If we look at the Kiali dashboard traffic tab for the client application we can see that this time the traffic was gRPC over mTLS. To exit from the CohQL> prompt type the bye command. The delete the client Pod kubectl -n coherence delete -f extend-client-pod.yaml Clients Outside Kubernetes When connecting Coherence Extend or gRPC clients from outside Kubernetes, the Coherence NameService cannot be used by clients to look up the endpoints. The clients must be configured with fixed endpoints using the hostnames and ports of the configured ingress or gateway services. Exactly how this is done will depend on the versions of Istio and Kubernetes being used and whether Ingress or the Kubernetes Gateway API is used. The different options available make it impossible to build an example that can cover all these scenarios. ", "title": "Coherence Clients" }, { @@ -2432,7 +2432,7 @@ }, { "location": "/docs/installation/07_webhooks", - "text": " If installing the operator using the manifest yaml file first replace the occurrences of self-signed in the yaml file with cert-manager . For example: curl -L https://github.com/oracle/coherence-operator/releases/download/v3.4.0/coherence-operator.yaml \\ -o coherence-operator.yaml sed -i s/self-signed/cert-manager/g coherence-operator.yaml kubectl apply -f coherence-operator.yaml Note On MacOS the sed command is slightly different for in-place replacement and requires an empty string after the -i parameter: sed -i '' s/self-signed/cert-manager/g coherence-operator.yaml ", + "text": " If installing the operator using the manifest yaml file first replace the occurrences of self-signed in the yaml file with cert-manager . For example: curl -L https://github.com/oracle/coherence-operator/releases/download/v3.4.1/coherence-operator.yaml \\ -o coherence-operator.yaml sed -i s/self-signed/cert-manager/g coherence-operator.yaml kubectl apply -f coherence-operator.yaml Note On MacOS the sed command is slightly different for in-place replacement and requires an empty string after the -i parameter: sed -i '' s/self-signed/cert-manager/g coherence-operator.yaml ", "title": "Install Using Manifest File" }, { @@ -2442,7 +2442,7 @@ }, { "location": "/docs/installation/07_webhooks", - "text": " Assuming Kubernetes Cert Manager has been installed in the Kubernetes cluster then to use it for managing the web-hook certificates, the Operator needs to be installed with the CERT_TYPE environment variable set to cert-manager . The Operator will then detect the version of Cert Manager and automatically create the required self-signed Issuer and Certificate resources. Cert Manager will detect these and create the Secret . This may cause the operator Pod to re-start until the Secret has been created. Install Using Manifest File If installing the operator using the manifest yaml file first replace the occurrences of self-signed in the yaml file with cert-manager . For example: curl -L https://github.com/oracle/coherence-operator/releases/download/v3.4.0/coherence-operator.yaml \\ -o coherence-operator.yaml sed -i s/self-signed/cert-manager/g coherence-operator.yaml kubectl apply -f coherence-operator.yaml Note On MacOS the sed command is slightly different for in-place replacement and requires an empty string after the -i parameter: sed -i '' s/self-signed/cert-manager/g coherence-operator.yaml Install Using Helm To set the certificate manager to use when installing the Helm chart, set the webhookCertType value: helm install \\ --namespace <namespace> \\ --set webhookCertType=cert-manager coherence-operator \\ coherence/coherence-operator The certificate manager will be set to cert-manager ", + "text": " Assuming Kubernetes Cert Manager has been installed in the Kubernetes cluster then to use it for managing the web-hook certificates, the Operator needs to be installed with the CERT_TYPE environment variable set to cert-manager . The Operator will then detect the version of Cert Manager and automatically create the required self-signed Issuer and Certificate resources. Cert Manager will detect these and create the Secret . This may cause the operator Pod to re-start until the Secret has been created. Install Using Manifest File If installing the operator using the manifest yaml file first replace the occurrences of self-signed in the yaml file with cert-manager . For example: curl -L https://github.com/oracle/coherence-operator/releases/download/v3.4.1/coherence-operator.yaml \\ -o coherence-operator.yaml sed -i s/self-signed/cert-manager/g coherence-operator.yaml kubectl apply -f coherence-operator.yaml Note On MacOS the sed command is slightly different for in-place replacement and requires an empty string after the -i parameter: sed -i '' s/self-signed/cert-manager/g coherence-operator.yaml Install Using Helm To set the certificate manager to use when installing the Helm chart, set the webhookCertType value: helm install \\ --namespace <namespace> \\ --set webhookCertType=cert-manager coherence-operator \\ coherence/coherence-operator The certificate manager will be set to cert-manager ", "title": "Cert Manager (Self-Signed)" }, { @@ -2477,7 +2477,7 @@ }, { "location": "/docs/installation/07_webhooks", - "text": " A web-hook requires certificates to be able to work in Kubernetes. By default, the operator will create and manage self-signed certificates for this purpose. These certificates are created using the Kubernetes certificate It is possible to use other certificates, either managed by the Kubernetes cert-manager or managed manually. The certificates should be stored in a Secret named coherence-webhook-server-cert in the same namespace that the operator has installed in. (although this name can be changed if required). This Secret must exist, or the operator wil fail to start. The Operator Helm chart will create this Secret when the Operator is managing its own self-signed certs, otherwise the Secret must be created manually or by an external certificate manager. Self-Signed Certificates This is the default option, the operator will create and manage a set of self-signed certificates. The Operator will update the Secret with its certificates and create the MutatingWebhookConfiguration and ValidatingWebhookConfiguration resources configured to use those certificates. Cert Manager (Self-Signed) Assuming Kubernetes Cert Manager has been installed in the Kubernetes cluster then to use it for managing the web-hook certificates, the Operator needs to be installed with the CERT_TYPE environment variable set to cert-manager . The Operator will then detect the version of Cert Manager and automatically create the required self-signed Issuer and Certificate resources. Cert Manager will detect these and create the Secret . This may cause the operator Pod to re-start until the Secret has been created. Install Using Manifest File If installing the operator using the manifest yaml file first replace the occurrences of self-signed in the yaml file with cert-manager . For example: curl -L https://github.com/oracle/coherence-operator/releases/download/v3.4.0/coherence-operator.yaml \\ -o coherence-operator.yaml sed -i s/self-signed/cert-manager/g coherence-operator.yaml kubectl apply -f coherence-operator.yaml Note On MacOS the sed command is slightly different for in-place replacement and requires an empty string after the -i parameter: sed -i '' s/self-signed/cert-manager/g coherence-operator.yaml Install Using Helm To set the certificate manager to use when installing the Helm chart, set the webhookCertType value: helm install \\ --namespace <namespace> \\ --set webhookCertType=cert-manager coherence-operator \\ coherence/coherence-operator The certificate manager will be set to cert-manager Manual Certificates If certificates will be managed some other way (for example by Cert Manager managing real certificates) then the CERT_TYPE environment variable should be set to manual . A Secret must exist in the namespace the operator will be installed into containing the CA certificate, certificate and key files that the operator will use to configure the web-hook. The files must exist with the names expected by the operator. The default name of the Secret expected by the operator is coherence-webhook-server-cert but this can be changed. The certificates in the Secret must be valid for the Service name that exposes the Coherence web-hook. The default format of the DNS used for the certificate CN (common name) is coherence-operator-webhook.<namespace>.svc where <namespace> is the namespace the operator is installed into. Additional names may also be configured using the different formats of Kubernetes Service DNS names. For example, if the Operator is installed into a namespace named coherence the Service DNS names would be: - coherence-operator-webhook.coherence - coherence-operator-webhook.coherence.svc - coherence-operator-webhook.coherence.svc.cluster.local An example of the format of the Secret is shown below: apiVersion: v1 kind: Secret metadata: name: coherence-webhook-server-cert type: Opaque data: ca.crt: ... # <base64 endocde CA certificate file> tls.crt: ... # <base64 endocde certificate file> tls.key: ... # <base64 endocde private key file> Warning If a Secret with the name specified in webhookCertSecret does not exist in the namespace the operator is being installed into then the operator Pod will not start as the Secret will be mounted as a volume in the operator Pod. Install Using Manifest File If installing the operator using the manifest yaml file first replace the occurrences of self-signed in the yaml file with cert-manager . For example: curl -L https://github.com/oracle/coherence-operator/releases/download/v3.3.5/coherence-operator.yaml \\ -o coherence-operator.yaml sed -i s/self-signed/manual/g coherence-operator.yaml kubectl apply -f coherence-operator.yaml Note On MacOS the sed command is slightly different for in-place replacement and requires an empty string after the -i parameter: sed -i '' s/self-signed/cert-manager/g coherence-operator.yaml Install Using Helm To configure the operator to use manually managed certificates when installing the Helm chart, set the webhookCertType value. helm install \\ --namespace <namespace> \\ --set webhookCertType=manual \\ coherence-operator \\ coherence/coherence-operator The certificate manager will be set to manual and the operator will expect to find a Secret named coherence-webhook-server-cert To use manually managed certificates and store the keys and certs in a different secret, set the secret name using the webhookCertSecret value. helm install \\ --namespace <namespace> \\ --set webhookCertType=manual \\ --set webhookCertSecret=operator-certs \\ coherence-operator \\ coherence/coherence-operator The certificate manager will be set to manual The name of the secret is set to operator-certs The Coherence Operator will now expect to find the keys and certs in a Secret named operator-certs in the same namespace that the Operator is deployed into. Install the Operator Without Web-Hooks It is possible to start the Operator without it registering any web-hooks with the API server. Caution Running the Operator without web-hooks is not recommended. The admission web-hooks validate the Coherence resource yaml before it gets into the k8s cluster. Without the web-hooks, invalid yaml will be accepted by k8s and the Operator will then log errors when it tries to reconcile invalid yaml. Or worse, the Operator will create an invalid StatefulSet which will then fail to start. Install Using Manifest File If installing using the manifest yaml files, then you need to edit the coherence-operator.yaml manifest to add a command line argument to the Operator. Update the controller-manager deployment and add an argument, edit the section that looks like this: args: - operator - --enable-leader-election and add the additional --enable-webhook=false argument like this: args: - operator - --enable-leader-election - --enable-webhook=false apiVersion: apps/v1 kind: Deployment metadata: name: controller-manager Installing Using Helm If installing the Operator using Helm, the webhooks value can be set to false in the values file or on the command line. helm install \\ --namespace <namespace> \\ --set webhooks=false \\ coherence-operator \\ coherence/coherence-operator ", + "text": " A web-hook requires certificates to be able to work in Kubernetes. By default, the operator will create and manage self-signed certificates for this purpose. These certificates are created using the Kubernetes certificate It is possible to use other certificates, either managed by the Kubernetes cert-manager or managed manually. The certificates should be stored in a Secret named coherence-webhook-server-cert in the same namespace that the operator has installed in. (although this name can be changed if required). This Secret must exist, or the operator wil fail to start. The Operator Helm chart will create this Secret when the Operator is managing its own self-signed certs, otherwise the Secret must be created manually or by an external certificate manager. Self-Signed Certificates This is the default option, the operator will create and manage a set of self-signed certificates. The Operator will update the Secret with its certificates and create the MutatingWebhookConfiguration and ValidatingWebhookConfiguration resources configured to use those certificates. Cert Manager (Self-Signed) Assuming Kubernetes Cert Manager has been installed in the Kubernetes cluster then to use it for managing the web-hook certificates, the Operator needs to be installed with the CERT_TYPE environment variable set to cert-manager . The Operator will then detect the version of Cert Manager and automatically create the required self-signed Issuer and Certificate resources. Cert Manager will detect these and create the Secret . This may cause the operator Pod to re-start until the Secret has been created. Install Using Manifest File If installing the operator using the manifest yaml file first replace the occurrences of self-signed in the yaml file with cert-manager . For example: curl -L https://github.com/oracle/coherence-operator/releases/download/v3.4.1/coherence-operator.yaml \\ -o coherence-operator.yaml sed -i s/self-signed/cert-manager/g coherence-operator.yaml kubectl apply -f coherence-operator.yaml Note On MacOS the sed command is slightly different for in-place replacement and requires an empty string after the -i parameter: sed -i '' s/self-signed/cert-manager/g coherence-operator.yaml Install Using Helm To set the certificate manager to use when installing the Helm chart, set the webhookCertType value: helm install \\ --namespace <namespace> \\ --set webhookCertType=cert-manager coherence-operator \\ coherence/coherence-operator The certificate manager will be set to cert-manager Manual Certificates If certificates will be managed some other way (for example by Cert Manager managing real certificates) then the CERT_TYPE environment variable should be set to manual . A Secret must exist in the namespace the operator will be installed into containing the CA certificate, certificate and key files that the operator will use to configure the web-hook. The files must exist with the names expected by the operator. The default name of the Secret expected by the operator is coherence-webhook-server-cert but this can be changed. The certificates in the Secret must be valid for the Service name that exposes the Coherence web-hook. The default format of the DNS used for the certificate CN (common name) is coherence-operator-webhook.<namespace>.svc where <namespace> is the namespace the operator is installed into. Additional names may also be configured using the different formats of Kubernetes Service DNS names. For example, if the Operator is installed into a namespace named coherence the Service DNS names would be: - coherence-operator-webhook.coherence - coherence-operator-webhook.coherence.svc - coherence-operator-webhook.coherence.svc.cluster.local An example of the format of the Secret is shown below: apiVersion: v1 kind: Secret metadata: name: coherence-webhook-server-cert type: Opaque data: ca.crt: ... # <base64 endocde CA certificate file> tls.crt: ... # <base64 endocde certificate file> tls.key: ... # <base64 endocde private key file> Warning If a Secret with the name specified in webhookCertSecret does not exist in the namespace the operator is being installed into then the operator Pod will not start as the Secret will be mounted as a volume in the operator Pod. Install Using Manifest File If installing the operator using the manifest yaml file first replace the occurrences of self-signed in the yaml file with cert-manager . For example: curl -L https://github.com/oracle/coherence-operator/releases/download/v3.3.5/coherence-operator.yaml \\ -o coherence-operator.yaml sed -i s/self-signed/manual/g coherence-operator.yaml kubectl apply -f coherence-operator.yaml Note On MacOS the sed command is slightly different for in-place replacement and requires an empty string after the -i parameter: sed -i '' s/self-signed/cert-manager/g coherence-operator.yaml Install Using Helm To configure the operator to use manually managed certificates when installing the Helm chart, set the webhookCertType value. helm install \\ --namespace <namespace> \\ --set webhookCertType=manual \\ coherence-operator \\ coherence/coherence-operator The certificate manager will be set to manual and the operator will expect to find a Secret named coherence-webhook-server-cert To use manually managed certificates and store the keys and certs in a different secret, set the secret name using the webhookCertSecret value. helm install \\ --namespace <namespace> \\ --set webhookCertType=manual \\ --set webhookCertSecret=operator-certs \\ coherence-operator \\ coherence/coherence-operator The certificate manager will be set to manual The name of the secret is set to operator-certs The Coherence Operator will now expect to find the keys and certs in a Secret named operator-certs in the same namespace that the Operator is deployed into. Install the Operator Without Web-Hooks It is possible to start the Operator without it registering any web-hooks with the API server. Caution Running the Operator without web-hooks is not recommended. The admission web-hooks validate the Coherence resource yaml before it gets into the k8s cluster. Without the web-hooks, invalid yaml will be accepted by k8s and the Operator will then log errors when it tries to reconcile invalid yaml. Or worse, the Operator will create an invalid StatefulSet which will then fail to start. Install Using Manifest File If installing using the manifest yaml files, then you need to edit the coherence-operator.yaml manifest to add a command line argument to the Operator. Update the controller-manager deployment and add an argument, edit the section that looks like this: args: - operator - --enable-leader-election and add the additional --enable-webhook=false argument like this: args: - operator - --enable-leader-election - --enable-webhook=false apiVersion: apps/v1 kind: Deployment metadata: name: controller-manager Installing Using Helm If installing the Operator using Helm, the webhooks value can be set to false in the values file or on the command line. helm install \\ --namespace <namespace> \\ --set webhooks=false \\ coherence-operator \\ coherence/coherence-operator ", "title": "Manage Web-Hook Certificates" }, { @@ -2697,7 +2697,7 @@ }, { "location": "/examples/300_helm/README", - "text": " The Coherence Operator has two images, the Operator itself and a second image containing an executable named runner which the Operator uses to run Coherence servers in the Pods it is managing. One of the other commands that the runner can execute is a status command, which queries the Operator for the current status of a Coherence resource. If you pull the image and execute it you can see the help text for the runner CLI. The following commands will pull the Operator utils image and run it to display the help fot eh status command: docker pull ghcr.io/oracle/coherence-operator:3.4.0 docker run -it --rm ghcr.io/oracle/coherence-operator:3.4.0 status -h By creating a K8s Job that runs the status command we can query the Operator for the status of the Coherence resource we installed from the Helm chart. Of course, we could have written something similar that used kubectl in the Job or similar to query k8s for the state of the Coherence resource, but this becomes more complex in RBAC enabled cluster. Querying the simple REST endpoint of the Coherence Operator does not require RBAC rules for the Job to execute. To run a simple status check we are only interested in the following parameters for the status command: Argument Description --operator-url The Coherence Operator URL, typically the operator’s REST service (default \"http://coherence-operator-rest.coherence.svc.local:8000\" --namespace The namespace the Coherence resource is deployed into. This will be the namespace our Helm chart was installed into. --name The name of the Coherence resource. This will be the name from the Helm chart install --timeout The maximum amount of time to wait for the Coherence resource to reach the required condition (default 5m0s) --interval The status check re-try interval (default 10s) First we can add a few additional default values to our Helm chart values file that will be sensible defaults to pass to the hook Job. spec: operator: namespace: coherence service: coherence-operator-rest port: 8000 image: ghcr.io/oracle/coherence-operator-utils:3.4.0 condition: Ready timeout: 5m interval: 10s We have added an operator section to isolate the values for the hook from the spec values used in our Coherence resource. We can now create the hook template in our Helm chart using the new values in the values file. chart/templates/hook.yaml apiVersion: batch/v1 kind: Job metadata: name: \"{{ .Release.Name }}-helm-hook\" namespace: {{ .Release.Namespace }} annotations: \"helm.sh/hook\": post-install,post-upgrade \"helm.sh/hook-delete-policy\": hook-succeeded spec: template: metadata: name: \"{{ .Release.Name }}-helm-hook\" spec: restartPolicy: Never containers: - name: post-install-job image: {{ .Values.operator.image }} command: - \"/files/runner\" - \"status\" - \"--namespace\" - {{ .Release.Namespace | quote }} - \"--name\" - {{ .Release.Name | quote }} - \"--operator-url\" - \"http://{{ .Values.operator.service | default \"coherence-operator-rest\" }}.{{ .Values.operator.namespace | default \"coherence\" }}.svc:{{ .Values.operator.port | default 8000 }}\" - \"--condition\" - {{ .Values.operator.condition | default \"Ready\" | quote }} - \"--timeout\" - {{ .Values.operator.timeout | default \"5m\" | quote }} - \"--interval\" - {{ .Values.operator.interval | default \"10s\" | quote }} content_copy Copied The annotations section is what tells Helm that this is a hook resource: annotations: \"helm.sh/hook\": post-install,post-upgrade \"helm.sh/hook-delete-policy\": hook-succeeded We define the hook as a post-install and post-update hook, so that it runs on both install and update of the Coherence resource. The hook job will also be deleted once it has successfully run. It will not be deleted if it fails, so we can look at the output of the failure in the Jon Pod logs. ", + "text": " The Coherence Operator has two images, the Operator itself and a second image containing an executable named runner which the Operator uses to run Coherence servers in the Pods it is managing. One of the other commands that the runner can execute is a status command, which queries the Operator for the current status of a Coherence resource. If you pull the image and execute it you can see the help text for the runner CLI. The following commands will pull the Operator utils image and run it to display the help fot eh status command: docker pull ghcr.io/oracle/coherence-operator:3.4.1 docker run -it --rm ghcr.io/oracle/coherence-operator:3.4.1 status -h By creating a K8s Job that runs the status command we can query the Operator for the status of the Coherence resource we installed from the Helm chart. Of course, we could have written something similar that used kubectl in the Job or similar to query k8s for the state of the Coherence resource, but this becomes more complex in RBAC enabled cluster. Querying the simple REST endpoint of the Coherence Operator does not require RBAC rules for the Job to execute. To run a simple status check we are only interested in the following parameters for the status command: Argument Description --operator-url The Coherence Operator URL, typically the operator’s REST service (default \"http://coherence-operator-rest.coherence.svc.local:8000\" --namespace The namespace the Coherence resource is deployed into. This will be the namespace our Helm chart was installed into. --name The name of the Coherence resource. This will be the name from the Helm chart install --timeout The maximum amount of time to wait for the Coherence resource to reach the required condition (default 5m0s) --interval The status check re-try interval (default 10s) First we can add a few additional default values to our Helm chart values file that will be sensible defaults to pass to the hook Job. spec: operator: namespace: coherence service: coherence-operator-rest port: 8000 image: ghcr.io/oracle/coherence-operator-utils:3.4.1 condition: Ready timeout: 5m interval: 10s We have added an operator section to isolate the values for the hook from the spec values used in our Coherence resource. We can now create the hook template in our Helm chart using the new values in the values file. chart/templates/hook.yaml apiVersion: batch/v1 kind: Job metadata: name: \"{{ .Release.Name }}-helm-hook\" namespace: {{ .Release.Namespace }} annotations: \"helm.sh/hook\": post-install,post-upgrade \"helm.sh/hook-delete-policy\": hook-succeeded spec: template: metadata: name: \"{{ .Release.Name }}-helm-hook\" spec: restartPolicy: Never containers: - name: post-install-job image: {{ .Values.operator.image }} command: - \"/files/runner\" - \"status\" - \"--namespace\" - {{ .Release.Namespace | quote }} - \"--name\" - {{ .Release.Name | quote }} - \"--operator-url\" - \"http://{{ .Values.operator.service | default \"coherence-operator-rest\" }}.{{ .Values.operator.namespace | default \"coherence\" }}.svc:{{ .Values.operator.port | default 8000 }}\" - \"--condition\" - {{ .Values.operator.condition | default \"Ready\" | quote }} - \"--timeout\" - {{ .Values.operator.timeout | default \"5m\" | quote }} - \"--interval\" - {{ .Values.operator.interval | default \"10s\" | quote }} content_copy Copied The annotations section is what tells Helm that this is a hook resource: annotations: \"helm.sh/hook\": post-install,post-upgrade \"helm.sh/hook-delete-policy\": hook-succeeded We define the hook as a post-install and post-update hook, so that it runs on both install and update of the Coherence resource. The hook job will also be deleted once it has successfully run. It will not be deleted if it fails, so we can look at the output of the failure in the Jon Pod logs. ", "title": "The Coherence Operator Utils Runner" }, { @@ -2717,12 +2717,12 @@ }, { "location": "/examples/300_helm/README", - "text": " The Helm install command (and update command) have a --wait argument that tells Helm to wait until the installed resources are ready. This can be very useful if you want to ensure that everything is created and running correctly after and install or upgrade. If you read the help test for the --wait argument you will see the following: The limitation should be obvious, Helm can only wait for a sub-set of al the possible resources that you can create from a Helm chart. It has no idea how to wait for a Coherence resource to be ready. To work around this limitation we can use a Helm chart hook , mre specifically a post-install and post-upgrade hook. A hook is typically a k8s Job that Helm will execute, you create the Job spec as part of the Helm chart templates. The Coherence Operator Utils Runner The Coherence Operator has two images, the Operator itself and a second image containing an executable named runner which the Operator uses to run Coherence servers in the Pods it is managing. One of the other commands that the runner can execute is a status command, which queries the Operator for the current status of a Coherence resource. If you pull the image and execute it you can see the help text for the runner CLI. The following commands will pull the Operator utils image and run it to display the help fot eh status command: docker pull ghcr.io/oracle/coherence-operator:3.4.0 docker run -it --rm ghcr.io/oracle/coherence-operator:3.4.0 status -h By creating a K8s Job that runs the status command we can query the Operator for the status of the Coherence resource we installed from the Helm chart. Of course, we could have written something similar that used kubectl in the Job or similar to query k8s for the state of the Coherence resource, but this becomes more complex in RBAC enabled cluster. Querying the simple REST endpoint of the Coherence Operator does not require RBAC rules for the Job to execute. To run a simple status check we are only interested in the following parameters for the status command: Argument Description --operator-url The Coherence Operator URL, typically the operator’s REST service (default \"http://coherence-operator-rest.coherence.svc.local:8000\" --namespace The namespace the Coherence resource is deployed into. This will be the namespace our Helm chart was installed into. --name The name of the Coherence resource. This will be the name from the Helm chart install --timeout The maximum amount of time to wait for the Coherence resource to reach the required condition (default 5m0s) --interval The status check re-try interval (default 10s) First we can add a few additional default values to our Helm chart values file that will be sensible defaults to pass to the hook Job. spec: operator: namespace: coherence service: coherence-operator-rest port: 8000 image: ghcr.io/oracle/coherence-operator-utils:3.4.0 condition: Ready timeout: 5m interval: 10s We have added an operator section to isolate the values for the hook from the spec values used in our Coherence resource. We can now create the hook template in our Helm chart using the new values in the values file. chart/templates/hook.yaml apiVersion: batch/v1 kind: Job metadata: name: \"{{ .Release.Name }}-helm-hook\" namespace: {{ .Release.Namespace }} annotations: \"helm.sh/hook\": post-install,post-upgrade \"helm.sh/hook-delete-policy\": hook-succeeded spec: template: metadata: name: \"{{ .Release.Name }}-helm-hook\" spec: restartPolicy: Never containers: - name: post-install-job image: {{ .Values.operator.image }} command: - \"/files/runner\" - \"status\" - \"--namespace\" - {{ .Release.Namespace | quote }} - \"--name\" - {{ .Release.Name | quote }} - \"--operator-url\" - \"http://{{ .Values.operator.service | default \"coherence-operator-rest\" }}.{{ .Values.operator.namespace | default \"coherence\" }}.svc:{{ .Values.operator.port | default 8000 }}\" - \"--condition\" - {{ .Values.operator.condition | default \"Ready\" | quote }} - \"--timeout\" - {{ .Values.operator.timeout | default \"5m\" | quote }} - \"--interval\" - {{ .Values.operator.interval | default \"10s\" | quote }} content_copy Copied The annotations section is what tells Helm that this is a hook resource: annotations: \"helm.sh/hook\": post-install,post-upgrade \"helm.sh/hook-delete-policy\": hook-succeeded We define the hook as a post-install and post-update hook, so that it runs on both install and update of the Coherence resource. The hook job will also be deleted once it has successfully run. It will not be deleted if it fails, so we can look at the output of the failure in the Jon Pod logs. Installing with the Hook If we repeat the Helm install command to install a Coherence resource with the hook in the chart we should see Helm wait and not complete until the Coherence resource (and by inference the StatefulSet and Pods) are all ready. helm install test ./chart If we were installing a large Coherence cluster, or doing a Helm upgrade, which results in a rolling upgrade of the Coherence cluster, this could take a lot longer than the default timeout we used of 5 minutes. We can alter the timeout and re-try interval using --set arguments. helm install test ./chart --set operator.timeout=20m --set operator.interval=1m In the above command the timeout is now 20 minutes and the status check will re-try every one minute. Skipping Hooks Sometime we might want to install the chart and not wait for everything to be ready. We can use the Helm --no-hooks argument to skip hook execution. helm install test ./chart --no-hooks Now the Helm install command will return as soon as the Coherence resource has been created. Other Helm Hooks We saw above how a custom post-install and post-update hook could be used to work aroud the restrictions of Helm’s --wait argument. Of course there are other hooks available in Helm that the method above could be used in. For example, say I had a front end application to be deployed using a Helm chart, but I did not want Helm to start the deployment until the Coherence back-end was ready, I could use the same method above in a pre-install hook. ", + "text": " The Helm install command (and update command) have a --wait argument that tells Helm to wait until the installed resources are ready. This can be very useful if you want to ensure that everything is created and running correctly after and install or upgrade. If you read the help test for the --wait argument you will see the following: The limitation should be obvious, Helm can only wait for a sub-set of al the possible resources that you can create from a Helm chart. It has no idea how to wait for a Coherence resource to be ready. To work around this limitation we can use a Helm chart hook , mre specifically a post-install and post-upgrade hook. A hook is typically a k8s Job that Helm will execute, you create the Job spec as part of the Helm chart templates. The Coherence Operator Utils Runner The Coherence Operator has two images, the Operator itself and a second image containing an executable named runner which the Operator uses to run Coherence servers in the Pods it is managing. One of the other commands that the runner can execute is a status command, which queries the Operator for the current status of a Coherence resource. If you pull the image and execute it you can see the help text for the runner CLI. The following commands will pull the Operator utils image and run it to display the help fot eh status command: docker pull ghcr.io/oracle/coherence-operator:3.4.1 docker run -it --rm ghcr.io/oracle/coherence-operator:3.4.1 status -h By creating a K8s Job that runs the status command we can query the Operator for the status of the Coherence resource we installed from the Helm chart. Of course, we could have written something similar that used kubectl in the Job or similar to query k8s for the state of the Coherence resource, but this becomes more complex in RBAC enabled cluster. Querying the simple REST endpoint of the Coherence Operator does not require RBAC rules for the Job to execute. To run a simple status check we are only interested in the following parameters for the status command: Argument Description --operator-url The Coherence Operator URL, typically the operator’s REST service (default \"http://coherence-operator-rest.coherence.svc.local:8000\" --namespace The namespace the Coherence resource is deployed into. This will be the namespace our Helm chart was installed into. --name The name of the Coherence resource. This will be the name from the Helm chart install --timeout The maximum amount of time to wait for the Coherence resource to reach the required condition (default 5m0s) --interval The status check re-try interval (default 10s) First we can add a few additional default values to our Helm chart values file that will be sensible defaults to pass to the hook Job. spec: operator: namespace: coherence service: coherence-operator-rest port: 8000 image: ghcr.io/oracle/coherence-operator-utils:3.4.1 condition: Ready timeout: 5m interval: 10s We have added an operator section to isolate the values for the hook from the spec values used in our Coherence resource. We can now create the hook template in our Helm chart using the new values in the values file. chart/templates/hook.yaml apiVersion: batch/v1 kind: Job metadata: name: \"{{ .Release.Name }}-helm-hook\" namespace: {{ .Release.Namespace }} annotations: \"helm.sh/hook\": post-install,post-upgrade \"helm.sh/hook-delete-policy\": hook-succeeded spec: template: metadata: name: \"{{ .Release.Name }}-helm-hook\" spec: restartPolicy: Never containers: - name: post-install-job image: {{ .Values.operator.image }} command: - \"/files/runner\" - \"status\" - \"--namespace\" - {{ .Release.Namespace | quote }} - \"--name\" - {{ .Release.Name | quote }} - \"--operator-url\" - \"http://{{ .Values.operator.service | default \"coherence-operator-rest\" }}.{{ .Values.operator.namespace | default \"coherence\" }}.svc:{{ .Values.operator.port | default 8000 }}\" - \"--condition\" - {{ .Values.operator.condition | default \"Ready\" | quote }} - \"--timeout\" - {{ .Values.operator.timeout | default \"5m\" | quote }} - \"--interval\" - {{ .Values.operator.interval | default \"10s\" | quote }} content_copy Copied The annotations section is what tells Helm that this is a hook resource: annotations: \"helm.sh/hook\": post-install,post-upgrade \"helm.sh/hook-delete-policy\": hook-succeeded We define the hook as a post-install and post-update hook, so that it runs on both install and update of the Coherence resource. The hook job will also be deleted once it has successfully run. It will not be deleted if it fails, so we can look at the output of the failure in the Jon Pod logs. Installing with the Hook If we repeat the Helm install command to install a Coherence resource with the hook in the chart we should see Helm wait and not complete until the Coherence resource (and by inference the StatefulSet and Pods) are all ready. helm install test ./chart If we were installing a large Coherence cluster, or doing a Helm upgrade, which results in a rolling upgrade of the Coherence cluster, this could take a lot longer than the default timeout we used of 5 minutes. We can alter the timeout and re-try interval using --set arguments. helm install test ./chart --set operator.timeout=20m --set operator.interval=1m In the above command the timeout is now 20 minutes and the status check will re-try every one minute. Skipping Hooks Sometime we might want to install the chart and not wait for everything to be ready. We can use the Helm --no-hooks argument to skip hook execution. helm install test ./chart --no-hooks Now the Helm install command will return as soon as the Coherence resource has been created. Other Helm Hooks We saw above how a custom post-install and post-update hook could be used to work aroud the restrictions of Helm’s --wait argument. Of course there are other hooks available in Helm that the method above could be used in. For example, say I had a front end application to be deployed using a Helm chart, but I did not want Helm to start the deployment until the Coherence back-end was ready, I could use the same method above in a pre-install hook. ", "title": "Helm Wait - Waiting for the Install to Complete" }, { "location": "/examples/300_helm/README", - "text": " Occasionally there is a requirement to manage Coherence resources using Helm instead of Kubernetes tools such as kubectl . There is no Helm chart for a Coherence resource as it is a single resource and any Helm chart and values file would need to replicate the entire Coherence CRD if it was to be of generic enough use for everyone. For this reason, anyone wanting to manage Coherence resource using Helm will need to create their own chart, which can then be specific to their needs. This example shows some ways that Helm can be used to manage Coherence resources. Tip The complete source code for this example is in the Coherence Operator GitHub repository. A Simple Generic Helm Chart This example contains the most basic Helm chart possible to support managing a Coherence resource locate in the chart/ directory. The chart is actually completely generic and would support any configuration of Coherence resource. The values file contains a single value spec , which will contain the entire spec of the Coherence resource. spec: There is a single template file, as we only create a single Coherence resource. test-cluster.yaml apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: {{ .Release.Name }} namespace: {{ .Release.Namespace }} labels: {{- include \"coherence-labels\" . | indent 4 }} spec: {{- if .Values.spec }} {{ toYaml .Values.spec | indent 2 }} {{- end }} content_copy Copied The first part of the template is fairly standard for a Helm chart, we configure the resource name, namespace and add some labels. The generic nature of the chart comes from the fact that the template then just takes the whole spec value from the values file, and if it is not null converts it to yaml under the spec: section of the template. This means that any yaml that is valid in a Coherence CRD spec section can be used in a values file (or with --set arguments) when installing the chart. Installing the Chart Installing the example Helm chart is as simple as any other chart. One difference here being that the chart is not installed into a chart repository so has to be installed from the char/ directory. If you wanted to you could The following commands are all run from the examples/helm directory so that the chart location is specified as ./chart . You can run the commands from anywhere, but you would need to specify the full path to the example chart directory. A Simple Dry Run To start with we will do a simple dry-run install that will display the yaml Helm would have created if the install command had been real. helm install test ./chart --dry-run The above command should result in the following output NAME: test LAST DEPLOYED: Sat Aug 28 16:30:53 2021 NAMESPACE: default STATUS: pending-install REVISION: 1 TEST SUITE: None HOOKS: MANIFEST: --- # Source: coherence-example/templates/coherence.yaml apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: test namespace: default labels: app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: test app.kubernetes.io/version: \"1.0.0\" spec: We can see at the bottom of the output the simple Coherence resource that would have been created by helm. This is a valid Coherence resource because every field in the spec section is optional. If the install had been real this would have resulted in a Coherence cluster named \"test\" with three storage enabled cluster members, as the default replica count is three. Setting Values But how do we set other values in the Coherence resouce. That is simple because Helm does not validate what we enter as values we can either create a values file with anything we like under the spec secion or we can specify values using the --set Helm argument. For example, if we wanted to set the replica count to six in a Coherence resource we would need to set the spec.replicas field to 6 , and we do exactly the same in the helm chart. We could create a values file like this: spec: replicas: 6 Which we can install with helm install test ./chart -f test-values.yaml Which would produce a Coherence resource like this: apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: test namespace: default labels: app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: test app.kubernetes.io/version: \"1.0.0\" spec: replicas: 6 We could have done the same thing using --set , for example: helm install test ./chart -f test-values.yaml --set spec.replicas=6 We can even set more deeply nested values, for example the Coherence log level is set in the spec.coherence.logLevel field of the Coherence CRD so we can use the same value in the Helm install command or values file: helm install test ./chart -f test-values.yaml --set spec.coherence.logLevel=9 Which would produce the following Coherence resource: apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: test namespace: default labels: app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: test app.kubernetes.io/version: \"1.0.0\" spec: coherence: logLevel: 9 Just like any Helm chart, whether you use --set arguments or use a values file depends on how complex the resulting yaml will be. Some fields of the Coherence CRD spec would be impractical to try to configure on the command line with --set and would be much simpler in the values file. Helm Wait - Waiting for the Install to Complete The Helm install command (and update command) have a --wait argument that tells Helm to wait until the installed resources are ready. This can be very useful if you want to ensure that everything is created and running correctly after and install or upgrade. If you read the help test for the --wait argument you will see the following: The limitation should be obvious, Helm can only wait for a sub-set of al the possible resources that you can create from a Helm chart. It has no idea how to wait for a Coherence resource to be ready. To work around this limitation we can use a Helm chart hook , mre specifically a post-install and post-upgrade hook. A hook is typically a k8s Job that Helm will execute, you create the Job spec as part of the Helm chart templates. The Coherence Operator Utils Runner The Coherence Operator has two images, the Operator itself and a second image containing an executable named runner which the Operator uses to run Coherence servers in the Pods it is managing. One of the other commands that the runner can execute is a status command, which queries the Operator for the current status of a Coherence resource. If you pull the image and execute it you can see the help text for the runner CLI. The following commands will pull the Operator utils image and run it to display the help fot eh status command: docker pull ghcr.io/oracle/coherence-operator:3.4.0 docker run -it --rm ghcr.io/oracle/coherence-operator:3.4.0 status -h By creating a K8s Job that runs the status command we can query the Operator for the status of the Coherence resource we installed from the Helm chart. Of course, we could have written something similar that used kubectl in the Job or similar to query k8s for the state of the Coherence resource, but this becomes more complex in RBAC enabled cluster. Querying the simple REST endpoint of the Coherence Operator does not require RBAC rules for the Job to execute. To run a simple status check we are only interested in the following parameters for the status command: Argument Description --operator-url The Coherence Operator URL, typically the operator’s REST service (default \"http://coherence-operator-rest.coherence.svc.local:8000\" --namespace The namespace the Coherence resource is deployed into. This will be the namespace our Helm chart was installed into. --name The name of the Coherence resource. This will be the name from the Helm chart install --timeout The maximum amount of time to wait for the Coherence resource to reach the required condition (default 5m0s) --interval The status check re-try interval (default 10s) First we can add a few additional default values to our Helm chart values file that will be sensible defaults to pass to the hook Job. spec: operator: namespace: coherence service: coherence-operator-rest port: 8000 image: ghcr.io/oracle/coherence-operator-utils:3.4.0 condition: Ready timeout: 5m interval: 10s We have added an operator section to isolate the values for the hook from the spec values used in our Coherence resource. We can now create the hook template in our Helm chart using the new values in the values file. chart/templates/hook.yaml apiVersion: batch/v1 kind: Job metadata: name: \"{{ .Release.Name }}-helm-hook\" namespace: {{ .Release.Namespace }} annotations: \"helm.sh/hook\": post-install,post-upgrade \"helm.sh/hook-delete-policy\": hook-succeeded spec: template: metadata: name: \"{{ .Release.Name }}-helm-hook\" spec: restartPolicy: Never containers: - name: post-install-job image: {{ .Values.operator.image }} command: - \"/files/runner\" - \"status\" - \"--namespace\" - {{ .Release.Namespace | quote }} - \"--name\" - {{ .Release.Name | quote }} - \"--operator-url\" - \"http://{{ .Values.operator.service | default \"coherence-operator-rest\" }}.{{ .Values.operator.namespace | default \"coherence\" }}.svc:{{ .Values.operator.port | default 8000 }}\" - \"--condition\" - {{ .Values.operator.condition | default \"Ready\" | quote }} - \"--timeout\" - {{ .Values.operator.timeout | default \"5m\" | quote }} - \"--interval\" - {{ .Values.operator.interval | default \"10s\" | quote }} content_copy Copied The annotations section is what tells Helm that this is a hook resource: annotations: \"helm.sh/hook\": post-install,post-upgrade \"helm.sh/hook-delete-policy\": hook-succeeded We define the hook as a post-install and post-update hook, so that it runs on both install and update of the Coherence resource. The hook job will also be deleted once it has successfully run. It will not be deleted if it fails, so we can look at the output of the failure in the Jon Pod logs. Installing with the Hook If we repeat the Helm install command to install a Coherence resource with the hook in the chart we should see Helm wait and not complete until the Coherence resource (and by inference the StatefulSet and Pods) are all ready. helm install test ./chart If we were installing a large Coherence cluster, or doing a Helm upgrade, which results in a rolling upgrade of the Coherence cluster, this could take a lot longer than the default timeout we used of 5 minutes. We can alter the timeout and re-try interval using --set arguments. helm install test ./chart --set operator.timeout=20m --set operator.interval=1m In the above command the timeout is now 20 minutes and the status check will re-try every one minute. Skipping Hooks Sometime we might want to install the chart and not wait for everything to be ready. We can use the Helm --no-hooks argument to skip hook execution. helm install test ./chart --no-hooks Now the Helm install command will return as soon as the Coherence resource has been created. Other Helm Hooks We saw above how a custom post-install and post-update hook could be used to work aroud the restrictions of Helm’s --wait argument. Of course there are other hooks available in Helm that the method above could be used in. For example, say I had a front end application to be deployed using a Helm chart, but I did not want Helm to start the deployment until the Coherence back-end was ready, I could use the same method above in a pre-install hook. ", + "text": " Occasionally there is a requirement to manage Coherence resources using Helm instead of Kubernetes tools such as kubectl . There is no Helm chart for a Coherence resource as it is a single resource and any Helm chart and values file would need to replicate the entire Coherence CRD if it was to be of generic enough use for everyone. For this reason, anyone wanting to manage Coherence resource using Helm will need to create their own chart, which can then be specific to their needs. This example shows some ways that Helm can be used to manage Coherence resources. Tip The complete source code for this example is in the Coherence Operator GitHub repository. A Simple Generic Helm Chart This example contains the most basic Helm chart possible to support managing a Coherence resource locate in the chart/ directory. The chart is actually completely generic and would support any configuration of Coherence resource. The values file contains a single value spec , which will contain the entire spec of the Coherence resource. spec: There is a single template file, as we only create a single Coherence resource. test-cluster.yaml apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: {{ .Release.Name }} namespace: {{ .Release.Namespace }} labels: {{- include \"coherence-labels\" . | indent 4 }} spec: {{- if .Values.spec }} {{ toYaml .Values.spec | indent 2 }} {{- end }} content_copy Copied The first part of the template is fairly standard for a Helm chart, we configure the resource name, namespace and add some labels. The generic nature of the chart comes from the fact that the template then just takes the whole spec value from the values file, and if it is not null converts it to yaml under the spec: section of the template. This means that any yaml that is valid in a Coherence CRD spec section can be used in a values file (or with --set arguments) when installing the chart. Installing the Chart Installing the example Helm chart is as simple as any other chart. One difference here being that the chart is not installed into a chart repository so has to be installed from the char/ directory. If you wanted to you could The following commands are all run from the examples/helm directory so that the chart location is specified as ./chart . You can run the commands from anywhere, but you would need to specify the full path to the example chart directory. A Simple Dry Run To start with we will do a simple dry-run install that will display the yaml Helm would have created if the install command had been real. helm install test ./chart --dry-run The above command should result in the following output NAME: test LAST DEPLOYED: Sat Aug 28 16:30:53 2021 NAMESPACE: default STATUS: pending-install REVISION: 1 TEST SUITE: None HOOKS: MANIFEST: --- # Source: coherence-example/templates/coherence.yaml apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: test namespace: default labels: app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: test app.kubernetes.io/version: \"1.0.0\" spec: We can see at the bottom of the output the simple Coherence resource that would have been created by helm. This is a valid Coherence resource because every field in the spec section is optional. If the install had been real this would have resulted in a Coherence cluster named \"test\" with three storage enabled cluster members, as the default replica count is three. Setting Values But how do we set other values in the Coherence resouce. That is simple because Helm does not validate what we enter as values we can either create a values file with anything we like under the spec secion or we can specify values using the --set Helm argument. For example, if we wanted to set the replica count to six in a Coherence resource we would need to set the spec.replicas field to 6 , and we do exactly the same in the helm chart. We could create a values file like this: spec: replicas: 6 Which we can install with helm install test ./chart -f test-values.yaml Which would produce a Coherence resource like this: apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: test namespace: default labels: app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: test app.kubernetes.io/version: \"1.0.0\" spec: replicas: 6 We could have done the same thing using --set , for example: helm install test ./chart -f test-values.yaml --set spec.replicas=6 We can even set more deeply nested values, for example the Coherence log level is set in the spec.coherence.logLevel field of the Coherence CRD so we can use the same value in the Helm install command or values file: helm install test ./chart -f test-values.yaml --set spec.coherence.logLevel=9 Which would produce the following Coherence resource: apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: test namespace: default labels: app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: test app.kubernetes.io/version: \"1.0.0\" spec: coherence: logLevel: 9 Just like any Helm chart, whether you use --set arguments or use a values file depends on how complex the resulting yaml will be. Some fields of the Coherence CRD spec would be impractical to try to configure on the command line with --set and would be much simpler in the values file. Helm Wait - Waiting for the Install to Complete The Helm install command (and update command) have a --wait argument that tells Helm to wait until the installed resources are ready. This can be very useful if you want to ensure that everything is created and running correctly after and install or upgrade. If you read the help test for the --wait argument you will see the following: The limitation should be obvious, Helm can only wait for a sub-set of al the possible resources that you can create from a Helm chart. It has no idea how to wait for a Coherence resource to be ready. To work around this limitation we can use a Helm chart hook , mre specifically a post-install and post-upgrade hook. A hook is typically a k8s Job that Helm will execute, you create the Job spec as part of the Helm chart templates. The Coherence Operator Utils Runner The Coherence Operator has two images, the Operator itself and a second image containing an executable named runner which the Operator uses to run Coherence servers in the Pods it is managing. One of the other commands that the runner can execute is a status command, which queries the Operator for the current status of a Coherence resource. If you pull the image and execute it you can see the help text for the runner CLI. The following commands will pull the Operator utils image and run it to display the help fot eh status command: docker pull ghcr.io/oracle/coherence-operator:3.4.1 docker run -it --rm ghcr.io/oracle/coherence-operator:3.4.1 status -h By creating a K8s Job that runs the status command we can query the Operator for the status of the Coherence resource we installed from the Helm chart. Of course, we could have written something similar that used kubectl in the Job or similar to query k8s for the state of the Coherence resource, but this becomes more complex in RBAC enabled cluster. Querying the simple REST endpoint of the Coherence Operator does not require RBAC rules for the Job to execute. To run a simple status check we are only interested in the following parameters for the status command: Argument Description --operator-url The Coherence Operator URL, typically the operator’s REST service (default \"http://coherence-operator-rest.coherence.svc.local:8000\" --namespace The namespace the Coherence resource is deployed into. This will be the namespace our Helm chart was installed into. --name The name of the Coherence resource. This will be the name from the Helm chart install --timeout The maximum amount of time to wait for the Coherence resource to reach the required condition (default 5m0s) --interval The status check re-try interval (default 10s) First we can add a few additional default values to our Helm chart values file that will be sensible defaults to pass to the hook Job. spec: operator: namespace: coherence service: coherence-operator-rest port: 8000 image: ghcr.io/oracle/coherence-operator-utils:3.4.1 condition: Ready timeout: 5m interval: 10s We have added an operator section to isolate the values for the hook from the spec values used in our Coherence resource. We can now create the hook template in our Helm chart using the new values in the values file. chart/templates/hook.yaml apiVersion: batch/v1 kind: Job metadata: name: \"{{ .Release.Name }}-helm-hook\" namespace: {{ .Release.Namespace }} annotations: \"helm.sh/hook\": post-install,post-upgrade \"helm.sh/hook-delete-policy\": hook-succeeded spec: template: metadata: name: \"{{ .Release.Name }}-helm-hook\" spec: restartPolicy: Never containers: - name: post-install-job image: {{ .Values.operator.image }} command: - \"/files/runner\" - \"status\" - \"--namespace\" - {{ .Release.Namespace | quote }} - \"--name\" - {{ .Release.Name | quote }} - \"--operator-url\" - \"http://{{ .Values.operator.service | default \"coherence-operator-rest\" }}.{{ .Values.operator.namespace | default \"coherence\" }}.svc:{{ .Values.operator.port | default 8000 }}\" - \"--condition\" - {{ .Values.operator.condition | default \"Ready\" | quote }} - \"--timeout\" - {{ .Values.operator.timeout | default \"5m\" | quote }} - \"--interval\" - {{ .Values.operator.interval | default \"10s\" | quote }} content_copy Copied The annotations section is what tells Helm that this is a hook resource: annotations: \"helm.sh/hook\": post-install,post-upgrade \"helm.sh/hook-delete-policy\": hook-succeeded We define the hook as a post-install and post-update hook, so that it runs on both install and update of the Coherence resource. The hook job will also be deleted once it has successfully run. It will not be deleted if it fails, so we can look at the output of the failure in the Jon Pod logs. Installing with the Hook If we repeat the Helm install command to install a Coherence resource with the hook in the chart we should see Helm wait and not complete until the Coherence resource (and by inference the StatefulSet and Pods) are all ready. helm install test ./chart If we were installing a large Coherence cluster, or doing a Helm upgrade, which results in a rolling upgrade of the Coherence cluster, this could take a lot longer than the default timeout we used of 5 minutes. We can alter the timeout and re-try interval using --set arguments. helm install test ./chart --set operator.timeout=20m --set operator.interval=1m In the above command the timeout is now 20 minutes and the status check will re-try every one minute. Skipping Hooks Sometime we might want to install the chart and not wait for everything to be ready. We can use the Helm --no-hooks argument to skip hook execution. helm install test ./chart --no-hooks Now the Helm install command will return as soon as the Coherence resource has been created. Other Helm Hooks We saw above how a custom post-install and post-update hook could be used to work aroud the restrictions of Helm’s --wait argument. Of course there are other hooks available in Helm that the method above could be used in. For example, say I had a front end application to be deployed using a Helm chart, but I did not want Helm to start the deployment until the Coherence back-end was ready, I could use the same method above in a pre-install hook. ", "title": "Manage Coherence Resources using Helm" }, { @@ -2792,7 +2792,7 @@ }, { "location": "/examples/400_Istio/README", - "text": " You can run the Coherence cluster and manage them using the Coherence Operator alongside Istio . Coherence clusters managed with the Coherence Operator 3.4.0 and later work with Istio 1.9.1 and later out of the box. Coherence caches can be accessed from outside the Coherence cluster via Coherence*Extend, REST, and other supported Coherence clients. Using Coherence clusters with Istio does not require the Coherence Operator to also be using Istio (and vice-versa) . The Coherence Operator can manage Coherence clusters independent of whether those clusters are using Istio or not. Although Coherence itself can be configured to use TLS, when using Istio Coherence cluster members and clients can just use the default socket configurations and Istio will control and route all the traffic over mTLS. Tip Coherence clusters can be manually configured to work with Istio, even if not using the Operator. See the Istio example in the No Operator Examples How Does Coherence Work with Istio? Istio is a \"Service Mesh\" so the clue to how Istio works in Kubernetes is in the name, it relies on the configuration of Kubernetes Services. This means that any ports than need to be accessed in Pods, including those using in \"Pod to Pod\" communication must be exposed via a Service. Usually a Pod can reach any port on another Pod even if it is not exposed in the container spec, but this is not the case when using Istio as only ports exposed by the Envoy proxy are allowed. For Coherence cluster membership, this means the cluster port and the local port must be exposed on a Service. To do this the local port must be configured to be a fixed port instead of the default ephemeral port. The Coherence Operator uses the default cluster port of 7574 and there is no reason to ever change this. The Coherence Operator always configures a fixed port for the local port so this works with Istio out of the box. In addition, the Operator uses the health check port to determine the status of a cluster, so this needs to be exposed so that the Operator can reach Coherence Pods. The Coherence localhost property can be set to the name of the Pod. This is easily done using the container environment variables, which the Operator does automatically. Coherence clusters are run as a StatefulSet in Kubernetes. This means that the Pods are configured with a host name and a subdomain based on the name of the StatefulSet headless service name, and it is this name that should be used to access Pods. For example for a Coherence resource named storage the Operator will create a StatefulSet named storgage with a headless service named storage-sts . Each Pod in a StatefulSet is numbered with a fixed identity, so the first Pod in this cluster will be storage-0 . The Pod has a number of DNS names that it is reachable with, but the fully qualified name using the headless service will be storage-0.storage-sts or storage-0.storage-sts.<namespace>.svc`. By default, the Operator will expose all the ports configured for the Coherence resource on the StatefulSet headless service. This allows Coherence Extend and gRPC clients to use this service name as the WKA address when using the Coherence NameService to lookup endpoints (see the client example below). Prerequisites The instructions assume that you are using a Kubernetes cluster with Istio installed and configured already. Enable Istio Strict Mode For this example we make Istio run in \"strict\" mode so that it will not allow any traffic between Pods outside the Envoy proxy. If other modes are used, such as permissive, then Istio allows Pod to Pod communication so a cluster may appear to work in permissive mode, when it would not in strict mode. To set Istio to strict mode create the following yaml file. apiVersion: security.istio.io/v1beta1 kind: PeerAuthentication metadata: name: \"default\" spec: mtls: mode: STRICT Install this yaml into the Istio system namespace with the following command: kubectl -n istio-system apply istio-strict.yaml Using the Coherence operator with Istio To use Coherence operator with Istio, you can deploy the operator into a namespace which has Istio automatic sidecar injection enabled. Before installing the operator, create the namespace in which you want to run the Coherence operator and label it for automatic injection. kubectl create namespace coherence kubectl label namespace coherence istio-injection=enabled Istio Sidecar AutoInjection is done automatically when you label the coherence namespace with istio-injection. Exclude the Operator Web-Hook from the Envoy Proxy The Coherence Operator uses an admissions web-hook, which Kubernetes will call to validate Coherence resources. This web-hook binds to port 9443 in the Operator Pods and is already configured to use TLS as is standard for Kubernetes admissions web-hooks. If this port is routed through the Envoy proxy Kubernetes will be unable to access the web-hook. The Operator yaml manifests and Helm chart already add the traffic.sidecar.istio.io/excludeInboundPorts annotation to the Operator Pods. This should exclude the web-hook port from being Istio. Another way to do this is to add a PeerAuthentication resource to the Operator namespace. Before installing the Operator , create the following PeerAuthentication yaml. apiVersion: security.istio.io/v1beta1 kind: PeerAuthentication metadata: name: \"coherence-operator\" spec: selector: matchLabels: app.kubernetes.io/name: coherence-operator app.kubernetes.io/instance: coherence-operator-manager app.kubernetes.io/component: manager mtls: mode: STRICT portLevelMtls: 9443: mode: PERMISSIVE Then install this PeerAuthentication resource into the same namespace that the Operator will be installed into. For example, if the Operator will be in the coherence namespace: kubectl -n coherence apply istio-operator.yaml You can then install the operator using your preferred method in the Operator Installation Guide . After installed operator, use the following command to confirm the operator is running: kubectl get pods -n coherence NAME READY STATUS RESTARTS AGE coherence-operator-controller-manager-7d76f9f475-q2vwv 2/2 Running 1 17h The output should show 2/2 in READY column, meaning there are 2 containers running in the Operator pod. One is Coherence Operator and the other is Envoy Proxy. If we use the Istio Kiali addon to visualize Istio we can see the Operator in the list of applications We can also see on the detailed view, that the Operator talks to the Kubernetes API server Creating a Coherence cluster with Istio You can configure a cluster to run with Istio automatic sidecar injection enabled. Before creating the cluster, create the namespace in which the cluster will run and label it for automatic injection. kubectl create namespace coherence-example kubectl label namespace coherence-example istio-injection=enabled Now create a Coherence resource as normal, there is no additional configuration required to work in Istio. For example using the yaml below to create a three member cluster with management and metrics enabled: apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: storage spec: replicas: 3 image: ghcr.io/oracle/coherence-ce:22.06.7 labels: app: storage version: 1.0.0 coherence: management: enabled: true metrics: enabled: true ports: - name: management - name: metrics - name: extend port: 20000 appProtocol: tcp - name: grpc-proxy port: 1408 appProtocol: grpc Istio prefers applications to have an app label Istio prefers applications to have a version label The Coherence Pods will expose ports for Management over REST, metrics, a Coherence*Extend proxy and a gRPC proxy The Operator will set the appProtocol for the management and metrics ports to http , but the Extend port must be set manually to tcp so that Istio knows what sort of traffic is being used by that port The gRPC port’s appProtocol field is set to grpc Using the Kiali console, we can now see two applications, the Coherence Operator in the \"coherence\" namespace and the \"storage\" application in the \"coherence-example\" namespace. If we look at the graph view we can see all the traffic between the different parts of the system We can see the Kubernetes API server accessing the Operator web-hook to validate the yaml We can see tge storage pods (the box marked \"storage 1.0.0\") communicate with each other via the storage-sts service to from a Coherence cluster We can see the storage pods communicate with the Operator REST service to request their Coherence site and rack labels We can see the Operator ping the storage pods health endpoints via the storage-sts service All of this traffic is using mTLS controlled by Istio Coherence Clients Running in Kubernetes Coherence Extend clients and gRPC clients running inside the cluster will also work with Istio. For this example the clients will be run in the coherence-client namespace, so it needs to be created and labelled so that Istio injection works in that namespace. kubectl create namespace coherence-client kubectl label namespace coherence-client istio-injection=enabled To simulate a client application a CoherenceJob resource will be used with different configurations for the different types of client. The simplest way to configure a Coherence extend client in a cache configuration file is a default configuration similar to that shown below. No ports or addresses need to be configured. Coherence will use the JVM’s configured cluster name and well know addresses to locate to look up the Extend endpoints using the Coherence NameService. <remote-cache-scheme> <scheme-name>thin-remote</scheme-name> <service-name>RemoteCache</service-name> <proxy-service-name>Proxy</proxy-service-name> </remote-cache-scheme> We can configure a CoherenceJob to run an Extend client with this configuration as shown below: apiVersion: coherence.oracle.com/v1 kind: CoherenceJob metadata: name: client spec: image: ghcr.io/oracle/coherence-ce:22.06.7 restartPolicy: Never cluster: storage coherence: wka: addresses: - \"storage-sts.coherence-example.svc\" application: type: operator args: - sleep - \"300s\" env: - name: COHERENCE_CLIENT value: \"remote\" - name: COHERENCE_PROFILE value: \"thin\" The client will use the CE image published on GitHub, which will use the default cache configuration file from Coherence jar. The cluster name must be set to the cluster name of the cluster started above, in this case storage The WKA address needs to be set to the DNS name of the headless service for the storage cluster created above. As this Job is running in a different name space this is the fully qualified name <service-name>.<namespace>.svc which is storage-sts.coherence-example.svc Instead of running a normal command this Job will run the Operator’s sleep command and sleep for 300s (300 seconds). The COHERENCE_CLIENT environment variable value of remote sets the Coherence cache configuration to be an Extend client using the NameService The COHERENCE_PROFILE environment variable value of thin sets the Coherence cache configuration not to use a Near Cache. The yaml above can be deployed into Kubernetes: kubectl -n coherence-client apply -f extend-client.yaml $ kubectl -n coherence-client get pod NAME READY STATUS RESTARTS AGE client-qgnw5 2/2 Running 0 80s The Pod is now running but not doing anything, just sleeping. If we look at the Kiali dashboard we can see the client application started and communicated wth the Operator. We can use this sleeping Pod to exec into and run commands. In this case we will create a Coherence QueryPlus client and run some CohQL commands. The command below will exec into the sleeping Pod. kubectl -n coherence-client exec -it client-qgnw5 -- /coherence-operator/utils/runner queryplus A QueryPlus client will be started and eventually display the CohQL> prompt. Coherence Command Line Tool CohQL> A simple command to try is just creating a cache, so at the prompt type the command create cache test which will create a cache named test . If all is configured correctly this client will connect to the cluster over Extend and create the cache called test and return to the CohQL prompt. Coherence Command Line Tool CohQL> create cache test We can also try selecting data from the cache using the CohQL query select * from test (which will return nothing as the cache is empty). CohQL> select * from test Results CohQL> If we now look at the Kiali dashboard we can see that the client has communicated with the storage cluster. All of this communication was using mTLS but without configuring Coherence to use TLS. To exit from the CohQL> prompt type the bye command. Coherence Extend clients can connect to the cluster also using Istio to provide mTLS support. Coherence clusters work with mTLS and Coherence clients can also support TLS through the Istio Gateway with TLS termination to connect to Coherence cluster running inside kubernetes. For example, you can apply the following Istio Gateway and Virtual Service in the namespace of the Coherence cluster. Before applying the gateway, create a secret for the credential from the certificate and key (e.g. server.crt and server.key) to be used by the Gateway: Coherence Clients Running Outside Kubernetes Coherence clients running outside the Kubernetes can be configured to connect to a Coherence cluster inside Kubernetes using any of the ingress or gateway features of Istio and Kubernetes. All the different ways to do this are beyond the scope of this simple example as there are many and they depend on the versions of Istio and Kubernetes being used. When connecting Coherence Extend or gRPC clients from outside Kubernetes, the Coherence NameService cannot be used by clients to look up the endpoints. The clients must be configured with fixed endpoints using the hostnames and ports of the configured ingress or gateway services. ", + "text": " You can run the Coherence cluster and manage them using the Coherence Operator alongside Istio . Coherence clusters managed with the Coherence Operator 3.4.1 and later work with Istio 1.9.1 and later out of the box. Coherence caches can be accessed from outside the Coherence cluster via Coherence*Extend, REST, and other supported Coherence clients. Using Coherence clusters with Istio does not require the Coherence Operator to also be using Istio (and vice-versa) . The Coherence Operator can manage Coherence clusters independent of whether those clusters are using Istio or not. Although Coherence itself can be configured to use TLS, when using Istio Coherence cluster members and clients can just use the default socket configurations and Istio will control and route all the traffic over mTLS. Tip Coherence clusters can be manually configured to work with Istio, even if not using the Operator. See the Istio example in the No Operator Examples How Does Coherence Work with Istio? Istio is a \"Service Mesh\" so the clue to how Istio works in Kubernetes is in the name, it relies on the configuration of Kubernetes Services. This means that any ports than need to be accessed in Pods, including those using in \"Pod to Pod\" communication must be exposed via a Service. Usually a Pod can reach any port on another Pod even if it is not exposed in the container spec, but this is not the case when using Istio as only ports exposed by the Envoy proxy are allowed. For Coherence cluster membership, this means the cluster port and the local port must be exposed on a Service. To do this the local port must be configured to be a fixed port instead of the default ephemeral port. The Coherence Operator uses the default cluster port of 7574 and there is no reason to ever change this. The Coherence Operator always configures a fixed port for the local port so this works with Istio out of the box. In addition, the Operator uses the health check port to determine the status of a cluster, so this needs to be exposed so that the Operator can reach Coherence Pods. The Coherence localhost property can be set to the name of the Pod. This is easily done using the container environment variables, which the Operator does automatically. Coherence clusters are run as a StatefulSet in Kubernetes. This means that the Pods are configured with a host name and a subdomain based on the name of the StatefulSet headless service name, and it is this name that should be used to access Pods. For example for a Coherence resource named storage the Operator will create a StatefulSet named storgage with a headless service named storage-sts . Each Pod in a StatefulSet is numbered with a fixed identity, so the first Pod in this cluster will be storage-0 . The Pod has a number of DNS names that it is reachable with, but the fully qualified name using the headless service will be storage-0.storage-sts or storage-0.storage-sts.<namespace>.svc`. By default, the Operator will expose all the ports configured for the Coherence resource on the StatefulSet headless service. This allows Coherence Extend and gRPC clients to use this service name as the WKA address when using the Coherence NameService to lookup endpoints (see the client example below). Prerequisites The instructions assume that you are using a Kubernetes cluster with Istio installed and configured already. Enable Istio Strict Mode For this example we make Istio run in \"strict\" mode so that it will not allow any traffic between Pods outside the Envoy proxy. If other modes are used, such as permissive, then Istio allows Pod to Pod communication so a cluster may appear to work in permissive mode, when it would not in strict mode. To set Istio to strict mode create the following yaml file. apiVersion: security.istio.io/v1beta1 kind: PeerAuthentication metadata: name: \"default\" spec: mtls: mode: STRICT Install this yaml into the Istio system namespace with the following command: kubectl -n istio-system apply istio-strict.yaml Using the Coherence operator with Istio To use Coherence operator with Istio, you can deploy the operator into a namespace which has Istio automatic sidecar injection enabled. Before installing the operator, create the namespace in which you want to run the Coherence operator and label it for automatic injection. kubectl create namespace coherence kubectl label namespace coherence istio-injection=enabled Istio Sidecar AutoInjection is done automatically when you label the coherence namespace with istio-injection. Exclude the Operator Web-Hook from the Envoy Proxy The Coherence Operator uses an admissions web-hook, which Kubernetes will call to validate Coherence resources. This web-hook binds to port 9443 in the Operator Pods and is already configured to use TLS as is standard for Kubernetes admissions web-hooks. If this port is routed through the Envoy proxy Kubernetes will be unable to access the web-hook. The Operator yaml manifests and Helm chart already add the traffic.sidecar.istio.io/excludeInboundPorts annotation to the Operator Pods. This should exclude the web-hook port from being Istio. Another way to do this is to add a PeerAuthentication resource to the Operator namespace. Before installing the Operator , create the following PeerAuthentication yaml. apiVersion: security.istio.io/v1beta1 kind: PeerAuthentication metadata: name: \"coherence-operator\" spec: selector: matchLabels: app.kubernetes.io/name: coherence-operator app.kubernetes.io/instance: coherence-operator-manager app.kubernetes.io/component: manager mtls: mode: STRICT portLevelMtls: 9443: mode: PERMISSIVE Then install this PeerAuthentication resource into the same namespace that the Operator will be installed into. For example, if the Operator will be in the coherence namespace: kubectl -n coherence apply istio-operator.yaml You can then install the operator using your preferred method in the Operator Installation Guide . After installed operator, use the following command to confirm the operator is running: kubectl get pods -n coherence NAME READY STATUS RESTARTS AGE coherence-operator-controller-manager-7d76f9f475-q2vwv 2/2 Running 1 17h The output should show 2/2 in READY column, meaning there are 2 containers running in the Operator pod. One is Coherence Operator and the other is Envoy Proxy. If we use the Istio Kiali addon to visualize Istio we can see the Operator in the list of applications We can also see on the detailed view, that the Operator talks to the Kubernetes API server Creating a Coherence cluster with Istio You can configure a cluster to run with Istio automatic sidecar injection enabled. Before creating the cluster, create the namespace in which the cluster will run and label it for automatic injection. kubectl create namespace coherence-example kubectl label namespace coherence-example istio-injection=enabled Now create a Coherence resource as normal, there is no additional configuration required to work in Istio. For example using the yaml below to create a three member cluster with management and metrics enabled: apiVersion: coherence.oracle.com/v1 kind: Coherence metadata: name: storage spec: replicas: 3 image: ghcr.io/oracle/coherence-ce:22.06.7 labels: app: storage version: 1.0.0 coherence: management: enabled: true metrics: enabled: true ports: - name: management - name: metrics - name: extend port: 20000 appProtocol: tcp - name: grpc-proxy port: 1408 appProtocol: grpc Istio prefers applications to have an app label Istio prefers applications to have a version label The Coherence Pods will expose ports for Management over REST, metrics, a Coherence*Extend proxy and a gRPC proxy The Operator will set the appProtocol for the management and metrics ports to http , but the Extend port must be set manually to tcp so that Istio knows what sort of traffic is being used by that port The gRPC port’s appProtocol field is set to grpc Using the Kiali console, we can now see two applications, the Coherence Operator in the \"coherence\" namespace and the \"storage\" application in the \"coherence-example\" namespace. If we look at the graph view we can see all the traffic between the different parts of the system We can see the Kubernetes API server accessing the Operator web-hook to validate the yaml We can see tge storage pods (the box marked \"storage 1.0.0\") communicate with each other via the storage-sts service to from a Coherence cluster We can see the storage pods communicate with the Operator REST service to request their Coherence site and rack labels We can see the Operator ping the storage pods health endpoints via the storage-sts service All of this traffic is using mTLS controlled by Istio Coherence Clients Running in Kubernetes Coherence Extend clients and gRPC clients running inside the cluster will also work with Istio. For this example the clients will be run in the coherence-client namespace, so it needs to be created and labelled so that Istio injection works in that namespace. kubectl create namespace coherence-client kubectl label namespace coherence-client istio-injection=enabled To simulate a client application a CoherenceJob resource will be used with different configurations for the different types of client. The simplest way to configure a Coherence extend client in a cache configuration file is a default configuration similar to that shown below. No ports or addresses need to be configured. Coherence will use the JVM’s configured cluster name and well know addresses to locate to look up the Extend endpoints using the Coherence NameService. <remote-cache-scheme> <scheme-name>thin-remote</scheme-name> <service-name>RemoteCache</service-name> <proxy-service-name>Proxy</proxy-service-name> </remote-cache-scheme> We can configure a CoherenceJob to run an Extend client with this configuration as shown below: apiVersion: coherence.oracle.com/v1 kind: CoherenceJob metadata: name: client spec: image: ghcr.io/oracle/coherence-ce:22.06.7 restartPolicy: Never cluster: storage coherence: wka: addresses: - \"storage-sts.coherence-example.svc\" application: type: operator args: - sleep - \"300s\" env: - name: COHERENCE_CLIENT value: \"remote\" - name: COHERENCE_PROFILE value: \"thin\" The client will use the CE image published on GitHub, which will use the default cache configuration file from Coherence jar. The cluster name must be set to the cluster name of the cluster started above, in this case storage The WKA address needs to be set to the DNS name of the headless service for the storage cluster created above. As this Job is running in a different name space this is the fully qualified name <service-name>.<namespace>.svc which is storage-sts.coherence-example.svc Instead of running a normal command this Job will run the Operator’s sleep command and sleep for 300s (300 seconds). The COHERENCE_CLIENT environment variable value of remote sets the Coherence cache configuration to be an Extend client using the NameService The COHERENCE_PROFILE environment variable value of thin sets the Coherence cache configuration not to use a Near Cache. The yaml above can be deployed into Kubernetes: kubectl -n coherence-client apply -f extend-client.yaml $ kubectl -n coherence-client get pod NAME READY STATUS RESTARTS AGE client-qgnw5 2/2 Running 0 80s The Pod is now running but not doing anything, just sleeping. If we look at the Kiali dashboard we can see the client application started and communicated wth the Operator. We can use this sleeping Pod to exec into and run commands. In this case we will create a Coherence QueryPlus client and run some CohQL commands. The command below will exec into the sleeping Pod. kubectl -n coherence-client exec -it client-qgnw5 -- /coherence-operator/utils/runner queryplus A QueryPlus client will be started and eventually display the CohQL> prompt. Coherence Command Line Tool CohQL> A simple command to try is just creating a cache, so at the prompt type the command create cache test which will create a cache named test . If all is configured correctly this client will connect to the cluster over Extend and create the cache called test and return to the CohQL prompt. Coherence Command Line Tool CohQL> create cache test We can also try selecting data from the cache using the CohQL query select * from test (which will return nothing as the cache is empty). CohQL> select * from test Results CohQL> If we now look at the Kiali dashboard we can see that the client has communicated with the storage cluster. All of this communication was using mTLS but without configuring Coherence to use TLS. To exit from the CohQL> prompt type the bye command. Coherence Extend clients can connect to the cluster also using Istio to provide mTLS support. Coherence clusters work with mTLS and Coherence clients can also support TLS through the Istio Gateway with TLS termination to connect to Coherence cluster running inside kubernetes. For example, you can apply the following Istio Gateway and Virtual Service in the namespace of the Coherence cluster. Before applying the gateway, create a secret for the credential from the certificate and key (e.g. server.crt and server.key) to be used by the Gateway: Coherence Clients Running Outside Kubernetes Coherence clients running outside the Kubernetes can be configured to connect to a Coherence cluster inside Kubernetes using any of the ingress or gateway features of Istio and Kubernetes. All the different ways to do this are beyond the scope of this simple example as there are many and they depend on the versions of Istio and Kubernetes being used. When connecting Coherence Extend or gRPC clients from outside Kubernetes, the Coherence NameService cannot be used by clients to look up the endpoints. The clients must be configured with fixed endpoints using the hostnames and ports of the configured ingress or gateway services. ", "title": "Using Coherence with Istio" }, { @@ -2822,7 +2822,7 @@ }, { "location": "/docs/about/03_quickstart", - "text": " If you want the default Coherence Operator installation then the simplest solution is use kubectl to apply the manifests from the Operator release. kubectl apply -f https://github.com/oracle/coherence-operator/releases/download/v3.4.0/coherence-operator.yaml This will create a namespace called coherence and install the Operator into it along with all the required ClusterRole and RoleBinding resources. The coherence namespace can be changed by downloading and editing the yaml file. Because the coherence-operator.yaml manifest also creates the namespace, the corresponding kubectl delete command will remove the namespace and everything deployed to it ! If you do not want this behaviour you should edit the coherence-operator.yaml to remove the namespace section from the start of the file. Alternatively Install Using Helm Alternatively you can install the Operator using the Helm chart. Add the Coherence Operator Helm repository Add the Coherence Operator Helm repo to your local Helm. helm repo add coherence https://oracle.github.io/coherence-operator/charts helm repo update To avoid confusion, the URL https://oracle.github.io/coherence-operator/charts is a Helm repo, it is not a web site you open in a browser. You may think we shouldn’t have to say this, but you’d be surprised. Install the Coherence Operator Helm chart helm install \\ --namespace <namespace> \\ <release-name> \\ coherence/coherence-operator e.g. if the Kubernetes namespace is coherence-test the command would be: helm install --namespace coherence-test operator coherence/coherence-operator or with Helm v2 helm install --namespace coherence-test --name operator coherence/coherence-operator See the full install guide for more details. ", + "text": " If you want the default Coherence Operator installation then the simplest solution is use kubectl to apply the manifests from the Operator release. kubectl apply -f https://github.com/oracle/coherence-operator/releases/download/v3.4.1/coherence-operator.yaml This will create a namespace called coherence and install the Operator into it along with all the required ClusterRole and RoleBinding resources. The coherence namespace can be changed by downloading and editing the yaml file. Because the coherence-operator.yaml manifest also creates the namespace, the corresponding kubectl delete command will remove the namespace and everything deployed to it ! If you do not want this behaviour you should edit the coherence-operator.yaml to remove the namespace section from the start of the file. Alternatively Install Using Helm Alternatively you can install the Operator using the Helm chart. Add the Coherence Operator Helm repository Add the Coherence Operator Helm repo to your local Helm. helm repo add coherence https://oracle.github.io/coherence-operator/charts helm repo update To avoid confusion, the URL https://oracle.github.io/coherence-operator/charts is a Helm repo, it is not a web site you open in a browser. You may think we shouldn’t have to say this, but you’d be surprised. Install the Coherence Operator Helm chart helm install \\ --namespace <namespace> \\ <release-name> \\ coherence/coherence-operator e.g. if the Kubernetes namespace is coherence-test the command would be: helm install --namespace coherence-test operator coherence/coherence-operator or with Helm v2 helm install --namespace coherence-test --name operator coherence/coherence-operator See the full install guide for more details. ", "title": "1. Install the Coherence Operator" }, { diff --git a/docs/snapshot/pages/docs/about/03_quickstart.js b/docs/snapshot/pages/docs/about/03_quickstart.js index cfbf5dc2..b56d516d 100644 --- a/docs/snapshot/pages/docs/about/03_quickstart.js +++ b/docs/snapshot/pages/docs/about/03_quickstart.js @@ -32,7 +32,7 @@ to install a simple Coherence cluster.

kubectl apply -f https://github.com/oracle/coherence-operator/releases/download/v3.4.0/coherence-operator.yaml +>kubectl apply -f https://github.com/oracle/coherence-operator/releases/download/v3.4.1/coherence-operator.yaml

This will create a namespace called coherence and install the Operator into it along with all the required ClusterRole and RoleBinding resources. The coherence namespace can be changed by downloading and editing the yaml file.

diff --git a/docs/snapshot/pages/docs/installation/01_installation.js b/docs/snapshot/pages/docs/installation/01_installation.js index 272d230e..77b7a0ed 100644 --- a/docs/snapshot/pages/docs/installation/01_installation.js +++ b/docs/snapshot/pages/docs/installation/01_installation.js @@ -8,7 +8,7 @@
Coherence Operator Installation
-

The Coherence Operator is available as an image from the GitHub container registry ghcr.io/oracle/coherence-operator:3.4.0 that can +

The Coherence Operator is available as an image from the GitHub container registry ghcr.io/oracle/coherence-operator:3.4.1 that can easily be installed into a Kubernetes cluster.

@@ -192,7 +192,7 @@ for more details if you have well-known-address issues when Pods attempt to form
  • -

    ghcr.io/oracle/coherence-operator:3.4.0 - The Operator image.

    +

    ghcr.io/oracle/coherence-operator:3.4.1 - The Operator image.

@@ -200,7 +200,7 @@ for more details if you have well-known-address issues when Pods attempt to form
  • -

    ghcr.io/oracle/coherence-ce:22.06.8 - The default Coherence image.

    +

    ghcr.io/oracle/coherence-ce:22.06.9 - The default Coherence image.

@@ -259,7 +259,7 @@ Operators trying to remove finalizers and delete a Coherence cluster.

kubectl apply -f https://github.com/oracle/coherence-operator/releases/download/v3.4.0/coherence-operator.yaml +>kubectl apply -f https://github.com/oracle/coherence-operator/releases/download/v3.4.1/coherence-operator.yaml

This will create a namespace called coherence and install the Operator into it along with all the required ClusterRole and RoleBinding resources. The coherence namespace can be changed by downloading and editing the yaml file.

@@ -349,15 +349,15 @@ lang="bash"

Set the Operator Image

-

The Helm chart uses a default Operator image from ghcr.io/oracle/coherence-operator:3.4.0. +

The Helm chart uses a default Operator image from ghcr.io/oracle/coherence-operator:3.4.1. If the image needs to be pulled from a different location (for example an internal registry) then there are two ways to override the default. Either set the individual image.registry, image.name and image.tag values, or set the whole image name by setting the image value.

For example, if the Operator image has been deployed into a private registry named foo.com but -with the same image name coherence-operator and tag 3.4.0 as the default image, +with the same image name coherence-operator and tag 3.4.1 as the default image, then just the image.registry needs to be specified.

-

In the example below, the image used to run the Operator will be foo.com/coherence-operator:3.4.0.

+

In the example below, the image used to run the Operator will be foo.com/coherence-operator:3.4.1.

  • -

    ghcr.io/oracle/coherence-operator-package:3.4.0 - the Coherence Operator package

    +

    ghcr.io/oracle/coherence-operator-package:3.4.1 - the Coherence Operator package

  • -

    ghcr.io/oracle/coherence-operator-repo:3.4.0 - the Coherence Operator repository

    +

    ghcr.io/oracle/coherence-operator-repo:3.4.1 - the Coherence Operator repository

  • diff --git a/docs/snapshot/pages/docs/installation/07_webhooks.js b/docs/snapshot/pages/docs/installation/07_webhooks.js index cdd9f042..0542cdc1 100644 --- a/docs/snapshot/pages/docs/installation/07_webhooks.js +++ b/docs/snapshot/pages/docs/installation/07_webhooks.js @@ -64,7 +64,7 @@ re-start until the Secret has been created.

    curl -L https://github.com/oracle/coherence-operator/releases/download/v3.4.0/coherence-operator.yaml \ +>curl -L https://github.com/oracle/coherence-operator/releases/download/v3.4.1/coherence-operator.yaml \ -o coherence-operator.yaml sed -i s/self-signed/cert-manager/g coherence-operator.yaml kubectl apply -f coherence-operator.yaml diff --git a/docs/snapshot/pages/docs/installation/09_RBAC.js b/docs/snapshot/pages/docs/installation/09_RBAC.js index c8dab19e..33415597 100644 --- a/docs/snapshot/pages/docs/installation/09_RBAC.js +++ b/docs/snapshot/pages/docs/installation/09_RBAC.js @@ -46,7 +46,7 @@

    Before installing the Operator, with either method described below, the CRDs MUST be manually installed from the Operator manifest files.

    The manifest files are published with the GitHub release at this link: -3.3.5 Manifests

    +3.3.5 Manifests

    You MUST ensure that the CRD manifests match the version of the Operator being installed.

    diff --git a/docs/snapshot/pages/docs/metrics/020_metrics.js b/docs/snapshot/pages/docs/metrics/020_metrics.js index fb1eccf7..00629b78 100644 --- a/docs/snapshot/pages/docs/metrics/020_metrics.js +++ b/docs/snapshot/pages/docs/metrics/020_metrics.js @@ -24,7 +24,7 @@ documentation for full details on the available metrics.

    Important Note Regarding Prometheus Metrics Prefix

    -

    From version 3.4.0 of the Coherence Operator, the packaged Grafana dashboards no longer use the vendor: prefix for querying Prometheus metrics. +

    From version 3.4.1 of the Coherence Operator, the packaged Grafana dashboards no longer use the vendor: prefix for querying Prometheus metrics. This prefix was deprecated a number of releases ago and the default, of legacy metrics, in Coherence and will be removed in the most recent Coherence releases after this Operator release.

    diff --git a/docs/snapshot/pages/docs/other/041_global_labels.js b/docs/snapshot/pages/docs/other/041_global_labels.js index 62836888..77142097 100644 --- a/docs/snapshot/pages/docs/other/041_global_labels.js +++ b/docs/snapshot/pages/docs/other/041_global_labels.js @@ -155,7 +155,7 @@ one global annotation foo=bar.

    by manually editing the yaml file before installing.

    Download the yaml manifest file from the GitHub repo -https://github.com/oracle/coherence-operator/releases/download/v3.4.0/coherence-operator.yaml

    +https://github.com/oracle/coherence-operator/releases/download/v3.4.1/coherence-operator.yaml

    Find the section of the yaml file the defines the Operator container args, the default looks like this

    diff --git a/docs/snapshot/pages/examples/015_simple_image/README.js b/docs/snapshot/pages/examples/015_simple_image/README.js index f827ff4f..5cbd3b51 100644 --- a/docs/snapshot/pages/examples/015_simple_image/README.js +++ b/docs/snapshot/pages/examples/015_simple_image/README.js @@ -94,7 +94,7 @@ title="pom.xml" <plugin> <groupId>com.google.cloud.tools</groupId> <artifactId>jib-maven-plugin</artifactId> - <version>3.4.0</version> + <version>3.4.1</version> </plugin> </plugins> </build> diff --git a/docs/snapshot/pages/examples/095_network_policies/README.js b/docs/snapshot/pages/examples/095_network_policies/README.js index 4850e7db..094c1d49 100644 --- a/docs/snapshot/pages/examples/095_network_policies/README.js +++ b/docs/snapshot/pages/examples/095_network_policies/README.js @@ -1223,7 +1223,7 @@ spec: spec: containers: - name: net-test - image: ghcr.io/oracle/coherence-operator:3.4.0 + image: ghcr.io/oracle/coherence-operator:3.4.1 env: - name: HOST value: net-test-coherence-server.coh-test.svc diff --git a/docs/snapshot/pages/examples/300_helm/README.js b/docs/snapshot/pages/examples/300_helm/README.js index 891cd9e3..70392efb 100644 --- a/docs/snapshot/pages/examples/300_helm/README.js +++ b/docs/snapshot/pages/examples/300_helm/README.js @@ -186,8 +186,8 @@ spec: docker pull ghcr.io/oracle/coherence-operator:3.4.0 -docker run -it --rm ghcr.io/oracle/coherence-operator:3.4.0 status -h +>docker pull ghcr.io/oracle/coherence-operator:3.4.1 +docker run -it --rm ghcr.io/oracle/coherence-operator:3.4.1 status -h

    By creating a K8s Job that runs the status command we can query the Operator for the status of the Coherence resource we installed from the Helm chart. Of course, we could have written something similar that used kubectl in the Job or similar to query k8s for the state of the Coherence resource, but this becomes more complex in RBAC enabled cluster. Querying the simple REST endpoint of the Coherence Operator does not require RBAC rules for the Job to execute.

    @@ -241,7 +241,7 @@ operator: namespace: coherence service: coherence-operator-rest port: 8000 - image: ghcr.io/oracle/coherence-operator-utils:3.4.0 + image: ghcr.io/oracle/coherence-operator-utils:3.4.1 condition: Ready timeout: 5m interval: 10s diff --git a/docs/snapshot/pages/examples/400_Istio/README.js b/docs/snapshot/pages/examples/400_Istio/README.js index ffcf130e..988348ea 100644 --- a/docs/snapshot/pages/examples/400_Istio/README.js +++ b/docs/snapshot/pages/examples/400_Istio/README.js @@ -3,7 +3,7 @@

    Using Coherence with Istio

    You can run the Coherence cluster and manage them using the Coherence Operator alongside Istio. -Coherence clusters managed with the Coherence Operator 3.4.0 and later work with Istio 1.9.1 and later out of the box. +Coherence clusters managed with the Coherence Operator 3.4.1 and later work with Istio 1.9.1 and later out of the box. Coherence caches can be accessed from outside the Coherence cluster via Coherence*Extend, REST, and other supported Coherence clients. Using Coherence clusters with Istio does not require the Coherence Operator to also be using Istio (and vice-versa) . diff --git a/docs/snapshot/pages/examples/no-operator/04_istio/README.js b/docs/snapshot/pages/examples/no-operator/04_istio/README.js index cdab6799..8efc5049 100644 --- a/docs/snapshot/pages/examples/no-operator/04_istio/README.js +++ b/docs/snapshot/pages/examples/no-operator/04_istio/README.js @@ -502,7 +502,7 @@ Various utilities are copied from the Operator image into the base.

    FROM ghcr.io/oracle/coherence-operator:3.4.0 AS Builder +>FROM ghcr.io/oracle/coherence-operator:3.4.1 AS Builder FROM ghcr.io/oracle/coherence-ce:22.06.7 COPY --from=Builder /files /files