diff --git a/workloads/logging/.gitignore b/workloads/logging/.gitignore new file mode 100644 index 00000000..a6d7ecd9 --- /dev/null +++ b/workloads/logging/.gitignore @@ -0,0 +1 @@ +temp/ diff --git a/workloads/logging/README.md b/workloads/logging/README.md index d06f6bae..9603886f 100644 --- a/workloads/logging/README.md +++ b/workloads/logging/README.md @@ -3,18 +3,24 @@ The purpose of this script is to deploy the logging stack which includes Elasticsearch, Fluentd and Kibana on an OpenShift cluster. -## Run +## Cluster Logging Stack Installation ``` $ ./deploy_logging_stack.sh ``` This deploys the cluster-logging-operator which creates and manages the Elasticsearch cluster, Fluentd DaemonSet - pod on each of the nodes and Kibana in openshift-logging namespace. +## Test Run +``` +$ ./run_logging_test.sh +``` +This runs the logging test based on env.sh parameters. If **DEPLOY_LOGGING** is set to `True`, it will also install Cluster Logging Stack using `deploy_logging_stack.sh` script + ## Environment variables Ensure to have `KUBECONFIG` set to the proper path to your desired cluster. ### CHANNEL -Default: `4.6` +Default: `stable-5.5` Update channel for the Elasticsearch and Cluster logging operators. ### CUSTOM_ES_URL @@ -27,15 +33,19 @@ Default: 3 Number of Elasticsearch nodes. ### ES_STORAGE_CLASS -Default: 'gp2' +Default: 'gp3-csi' Storage class to use for the persistent storage. The faster the storage, better the Elasticsearch performance. ### ES_STORAGE_SIZE Default: `100G` Each data node in the cluster is bound to a Persistent Volume Claim that requests the size specified using this variable from the cloud storage. +### ES_MEMORY_LIMITS +Default: `16Gi` +Memory limits for the Elasticsearch as needed. + ### ES_MEMORY_REQUESTS -Default: `8Gi` +Default: `16Gi` Memory requests for the Elasticsearch as needed. ### ES_PROXY_MEMORY_LIMITS @@ -69,7 +79,11 @@ Logs to forward to the Elasticsearch backend. Only application logs are forwarde ### TIMEOUT Default: 180 -Time to wait for resources created to be up before exiting +Time to wait for resources created before exiting + +### DEBUG +Default: true +Enable debug logging on snafu execution ### TEST_CLEANUP Default: true @@ -83,7 +97,7 @@ Benchmark timeout in seconds ## Suggested configuration -[Log store guide](https://docs.openshift.com/container-platform/4.6/logging/config/) can be used to configure the stack depending on the scale, performance and redundancy we need. The following variables can be exported as the environment variables to tweak the supported parameters: +[Log store guide](https://docs.openshift.com/container-platform/4.12/logging/cluster-logging-deploying.html#cluster-logging-deploy-cli_cluster-logging-deploying) can be used to configure the stack depending on the scale, performance and redundancy we need. The following variables can be exported as the environment variables to tweak the supported parameters: ```sh export CHANNEL= diff --git a/workloads/logging/common.sh b/workloads/logging/common.sh index d35b8455..8ceab3b4 100644 --- a/workloads/logging/common.sh +++ b/workloads/logging/common.sh @@ -13,16 +13,6 @@ log() { # Check if oc client is installed log "Checking if oc client is installed" -# Check cluster's health -if [[ ${CERBERUS_URL} ]]; then - response=$(curl ${CERBERUS_URL}) - if [ "$response" != "True" ]; then - log "Cerberus status is False, Cluster is unhealthy" - exit 1 - fi -fi - - deploy_operator() { deploy_benchmark_operator } @@ -30,59 +20,60 @@ deploy_operator() { deploy_logging_stack() { log "Deploying logging stack" source env.sh - ./deploy_logging_stack.sh + if ! ./deploy_logging_stack.sh ; then + log "Failed to deploy logging stack, exiting..." + exit 1 + fi } + run_workload() { - timestamp=`date "+%d-%m-%YT%H:%M:%S"` + timestamp="$(date "+%d-%m-%YT%H:%M:%S")" + export CLUSTER_ID="$(oc get infrastructure.config.openshift.io cluster -o json 2>/dev/null | jq -r .status.infrastructureName)" + mkdir -p temp log "Customizing log-generator CR file" - envsubst < files/log_generator.yaml > log_generator_$timestamp.yaml - if [[ ${DEPLOY_LOGGING} == "true" ]]; then - # Get bearer token and ES url if applicable - if [[ ${CUSTOM_ES_URL} == "" ]]; then - ES_BACKEND_TOKEN=`oc whoami -t` - ES_BACKEND_URL=`oc get route elasticsearch -n openshift-logging -o jsonpath={.spec.host}` - else - ES_BACKEND_URL=$CUSTOM_ES_URL - fi + envsubst < files/log_generator.yaml > temp/log_generator_"${timestamp}".yaml + # Get bearer token and ES url if applicable + if [[ "${CUSTOM_ES_URL}" == "" ]]; then + ES_BACKEND_TOKEN="$(oc create token elasticsearch -n openshift-logging --duration 24h)" + ES_BACKEND_URL="$(oc get route elasticsearch -n openshift-logging -o "jsonpath={.spec.host}")" + else + ES_BACKEND_URL="${CUSTOM_ES_URL}" fi # Add all viable options to the yaml - if [[ ${ES_BACKEND_URL} != "" ]]; then - echo " es_url: "$ES_BACKEND_URL >> log_generator_$timestamp.yaml - fi - if [[ ${ES_BACKEND_TOKEN} != "" ]]; then - echo " es_token: "$ES_BACKEND_TOKEN >> log_generator_$timestamp.yaml + if [[ "${ES_BACKEND_URL}" != "" ]]; then + echo " es_url: https://${ES_BACKEND_URL}" >> temp/log_generator_"${timestamp}".yaml fi - if [[ ${ES_BACKEND_INDEX} != "" ]]; then - echo " es_index: "$ES_BACKEND_INDEX >> log_generator_$timestamp.yaml + if [[ "${ES_BACKEND_TOKEN}" != "" ]]; then + echo " es_token: ${ES_BACKEND_TOKEN}" >> temp/log_generator_"${timestamp}".yaml fi - if [[ ${ES_BACKEND_INDEX} != "" ]]; then - echo " es_index: "$ES_BACKEND_INDEX >> log_generator_$timestamp.yaml + if [[ "${ES_BACKEND_INDEX}" != "" ]]; then + echo " es_index: ${ES_BACKEND_INDEX}" >> temp/log_generator_"${timestamp}".yaml fi - if [[ ${CLOUDWATCH_LOG_GROUP} != "" ]]; then - echo " cloudwatch_log_group: "$CLOUDWATCH_LOG_GROUP >> log_generator_$timestamp.yaml + if [[ "${CLOUDWATCH_LOG_GROUP}" != "" ]]; then + echo " cloudwatch_log_group: ${CLOUDWATCH_LOG_GROUP}" >> temp/log_generator_"${timestamp}".yaml fi - if [[ ${AWS_REGION} != "" ]]; then - echo " aws_region: "$AWS_REGION >> log_generator_$timestamp.yaml + if [[ "${AWS_REGION}" != "" ]]; then + echo " aws_region: ${AWS_REGION}" >> temp/log_generator_"${timestamp}".yaml fi - if [[ ${AWS_ACCESS_KEY} != "" ]]; then - echo " aws_access_key: "$AWS_ACCESS_KEY >> log_generator_$timestamp.yaml + if [[ "${AWS_ACCESS_KEY}" != "" ]]; then + echo " aws_access_key: ${AWS_ACCESS_KEY}" >> temp/log_generator_"${timestamp}".yaml fi - if [[ ${AWS_SECRET_KEY} != "" ]]; then - echo " aws_secret_key: "$AWS_SECRET_KEY >> log_generator_$timestamp.yaml + if [[ "${AWS_SECRET_KEY}" != "" ]]; then + echo " aws_secret_key: ${AWS_SECRET_KEY}" >> temp/log_generator_"${timestamp}".yaml fi - if [[ ${NODE_SELECTOR_KEY} != "" ]]; then - echo " label:" >> log_generator_$timestamp.yaml - echo " key: " >> log_generator_$timestamp.yaml - echo " value: " >> log_generator_$timestamp.yaml + if [[ "${NODE_SELECTOR_KEY}" != "" ]] && [[ "${NODE_SELECTOR_VALUE}" != "" ]]; then + echo " label:" >> temp/log_generator_"${timestamp}".yaml + echo " key: ${NODE_SELECTOR_KEY}" >> temp/log_generator_"${timestamp}".yaml + echo " value: ${NODE_SELECTOR_VALUE}" >> temp/log_generator_"${timestamp}".yaml fi - run_benchmark log_generator_$timestamp.yaml ${TEST_TIMEOUT} + run_benchmark temp/log_generator_"${timestamp}".yaml "${TEST_TIMEOUT}" local rc=$? - if [[ ${TEST_CLEANUP} == "true" ]]; then + if [[ "${TEST_CLEANUP}" == "true" ]]; then log "Cleaning up benchmark" - kubectl delete -f ${TMPCR} + kubectl delete -f "${TMPCR}" fi - return ${rc} + return "${rc}" } diff --git a/workloads/logging/deploy_logging_stack.sh b/workloads/logging/deploy_logging_stack.sh index 1957bdd5..f676d11c 100755 --- a/workloads/logging/deploy_logging_stack.sh +++ b/workloads/logging/deploy_logging_stack.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -set -e +#set -e # Source env.sh to read all the vars source env.sh @@ -28,7 +28,7 @@ fi function install() { # create cluster logging and elasticsearch resources - if [[ $CUSTOM_ES_URL != "" ]]; then + if [[ "${CUSTOM_ES_URL}" != "" ]]; then log "Creating cluster logging with custom elasticsearch backend" envsubst < ./files/logging-stack_custom_es.yml | oc create -f - else @@ -37,22 +37,15 @@ function install() { fi } -wait_time=0 function cleanup() { - oc delete --wait=true project openshift-logging --ignore-not-found - oc delete --wait=true project openshift-operators-redhat --ignore-not-found - while [[ $( oc get projects | grep -w "openshift-logging\|openshift-operators-redhat") ]]; do - sleep 5 - wait_time=$((wait_time+5)) - if [[ $wait_time -ge $TIMEOUT ]]; then - log "openshift-logging/openshift-operators-redhat namespaces still exists after $TIMEOUT, please check. Exiting" - exit 1 - fi - done + if ! oc delete --wait=true project openshift-logging openshift-operators-redhat --ignore-not-found --timeout=180s; then + log "openshift-logging/openshift-operators-redhat namespaces exists after 3 minutes, please check. Exiting..." + exit 1 + fi } # Delete the namespaces if already exists -log "Deleting openshift-logging/openshift-operators-redhat namespaces if exists" +log "Deleting openshift-logging/openshift-operators-redhat namespaces if exists (3 minutes wait)" cleanup # Install the necessary objects for setting up elastic and logging operators and create a cluster logging instance @@ -66,25 +59,15 @@ sleep 60 oc wait --for=condition=available -n openshift-logging deployment/cluster-logging-operator --timeout=180s log "Checking the status" for deployment in $( oc get deployments -n openshift-logging | awk 'NR!=1{print $1}'); do oc wait --for=condition=available -n openshift-logging deployment/$deployment --timeout=180s; done -wait_time=0 -while [[ $( oc get daemonset.apps/fluentd -n openshift-logging -o=jsonpath='{.status.desiredNumberScheduled}' ) != $( oc get daemonset.apps/fluentd -n openshift-logging -o=jsonpath='{.status.numberReady}' ) ]]; do - log "Waiting for fluentd daemonset" - sleep 5 - wait_time=$((wait_time+5)) - if [[ $wait_time -ge $TIMEOUT ]]; then - log "Fluentd daemonset is not ready after $TIMEOUT, please check. Exiting" - exit 1 - fi -done log "Logging stack is up" -if [[ $CUSTOM_ES_URL == "" ]]; then +if [[ -z "${CUSTOM_ES_URL}" ]]; then # Expose the elasticsearch service log "Exposing the elasticsearch service by creating a route" oc extract secret/elasticsearch --to=/tmp/ --keys=admin-ca --confirm -n openshift-logging cp files/elasticsearch-route.yml /tmp/elasticsearch-route.yml cat /tmp/admin-ca | sed -e "s/^/ /" >> /tmp/elasticsearch-route.yml oc create -f /tmp/elasticsearch-route.yml -n openshift-logging - routeES=`oc get route elasticsearch -n openshift-logging -o jsonpath={.spec.host}` + routeES=$(oc get route elasticsearch -n openshift-logging -o "jsonpath={.spec.host}") log "Elasticsearch is exposed at $routeES, bearer token is needed to access it" fi diff --git a/workloads/logging/env.sh b/workloads/logging/env.sh index 24a73c5a..bbbb90f9 100755 --- a/workloads/logging/env.sh +++ b/workloads/logging/env.sh @@ -13,6 +13,7 @@ export DURATION=${DURATION:-1} export MESSAGES_PER_SECOND=${MESSAGES_PER_SECOND:-0} export POD_COUNT=${POD_COUNT:-1} export TIMEOUT=${TIMEOUT:-600} +export DEBUG=${DEBUG:-false} # ES backend information export ES_BACKEND_URL=${ES_BACKEND_URL:-""} @@ -25,7 +26,7 @@ export AWS_REGION=${AWS_REGION:-""} export AWS_ACCESS_KEY=${AWS_ACCESS_KEY:-""} export AWS_SECRET_KEY=${AWS_SECRET_KEY:-""} -# Node Selector +# Test Pods Node Selector export NODE_SELECTOR_KEY=${NODE_SELECTOR_KEY:-""} export NODE_SELECTOR_VALUE=${NODE_SELECTOR_VALUE:-""} @@ -36,19 +37,19 @@ export DEPLOY_LOGGING=${DEPLOY_LOGGING:-true} export TEST_CLEANUP=${TEST_CLEANUP:-"false"} # Deploy Variables -export CHANNEL=${CHANNEL:=4.6} +export CHANNEL=${CHANNEL:="stable-5.6"} export CUSTOM_ES_URL=${CUSTOM_ES_URL:=""} export ES_NODE_COUNT=${ES_NODE_COUNT:=3} -export ES_STORAGE_CLASS=${ES_STORAGE_CLASS:=gp2} -export ES_STORAGE_SIZE=${ES_STORAGE_SIZE:=100G} -export ES_MEMORY_REQUESTS=${ES_MEMORY_REQUESTS:=8Gi} -export ES_PROXY_MEMORY_LIMITS=${ES_PROXY_MEMORY_LIMITS:=256Mi} -export ES_PROXY_MEMORY_REQUESTS=${ES_PROXY_MEMORY_REQUESTS:=256Mi} -export ES_REDUNDANCY_POLICY=${ES_REDUNDANCY_POLICY:=SingleRedundancy} -export FLUENTD_MEMORY_LIMITS=${FLUENTD_MEMORY_LIMITS:=1Gi} -export FLUENTD_CPU_REQUESTS=${FLUENTD_CPU_REQUESTS:=500m} -export FLUENTD_MEMORY_REQUESTS=${FLUENTD_MEMORY_REQUESTS:=1Gi} -export FORWARD_LOGS=${FORWARD_LOGS:=[application]} -export TIMEOUT=${TIMEOUT:=180} +export ES_STORAGE_CLASS=${ES_STORAGE_CLASS:="gp3-csi"} +export ES_STORAGE_SIZE=${ES_STORAGE_SIZE:="100G"} +export ES_MEMORY_LIMITS=${ES_MEMORY_LIMITS:="16Gi"} +export ES_MEMORY_REQUESTS=${ES_MEMORY_REQUESTS:="16Gi"} +export ES_PROXY_MEMORY_LIMITS=${ES_PROXY_MEMORY_LIMITS:="256Mi"} +export ES_PROXY_MEMORY_REQUESTS=${ES_PROXY_MEMORY_REQUESTS:="256Mi"} +export ES_REDUNDANCY_POLICY=${ES_REDUNDANCY_POLICY:="SingleRedundancy"} +export FLUENTD_MEMORY_LIMITS=${FLUENTD_MEMORY_LIMITS:="1Gi"} +export FLUENTD_CPU_REQUESTS=${FLUENTD_CPU_REQUESTS:="500m"} +export FLUENTD_MEMORY_REQUESTS=${FLUENTD_MEMORY_REQUESTS:="1Gi"} +export FORWARD_LOGS=${FORWARD_LOGS:="[application]"} TEST_CLEANUP=${TEST_CLEANUP:-true} export TEST_TIMEOUT=${TEST_TIMEOUT:-7200} diff --git a/workloads/logging/files/log_generator.yaml b/workloads/logging/files/log_generator.yaml index fdae081b..ecf10c98 100644 --- a/workloads/logging/files/log_generator.yaml +++ b/workloads/logging/files/log_generator.yaml @@ -4,6 +4,7 @@ metadata: name: log-generator namespace: benchmark-operator spec: + clustername: ${CLUSTER_ID} uuid: ${UUID} elasticsearch: url: ${ES_SERVER} @@ -13,6 +14,7 @@ spec: workload: name: log_generator args: + debug: ${DEBUG} pod_count: ${POD_COUNT} size: ${MESSAGE_SIZE} messages_per_second: ${MESSAGES_PER_SECOND} diff --git a/workloads/logging/files/logging-stack-vector.yml b/workloads/logging/files/logging-stack-vector.yml new file mode 100644 index 00000000..3e41113d --- /dev/null +++ b/workloads/logging/files/logging-stack-vector.yml @@ -0,0 +1,114 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-operators-redhat + annotations: + openshift.io/node-selector: "" + labels: + openshift.io/cluster-monitoring: "true" +--- +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-logging + annotations: + openshift.io/node-selector: "" + labels: + openshift.io/cluster-monitoring: "true" +--- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: openshift-operators-redhat + namespace: openshift-operators-redhat +spec: {} +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: "elasticsearch-operator" + namespace: "openshift-operators-redhat" +spec: + channel: "${CHANNEL}" + installPlanApproval: "Automatic" + source: "redhat-operators" + sourceNamespace: "openshift-marketplace" + name: "elasticsearch-operator" +--- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: cluster-logging + namespace: openshift-logging +spec: + targetNamespaces: + - openshift-logging +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: cluster-logging + namespace: openshift-logging +spec: + channel: "${CHANNEL}" + name: cluster-logging + source: redhat-operators + sourceNamespace: openshift-marketplace +--- +apiVersion: "logging.openshift.io/v1" +kind: "ClusterLogging" +metadata: + name: "instance" + namespace: "openshift-logging" + annotations: + logging.openshift.io/preview-vector-collector: enabled +spec: + managementState: "Managed" + logStore: + type: "elasticsearch" + retentionPolicy: + application: + maxAge: 30d + infra: + maxAge: 30d + audit: + maxAge: 30d + elasticsearch: + nodeCount: ${ES_NODE_COUNT} + storage: + storageClassName: "${ES_STORAGE_CLASS}" + size: "${ES_STORAGE_SIZE}" + resources: + limits: + memory: "${ES_MEMORY_LIMITS}" + requests: + memory: "${ES_MEMORY_REQUESTS}" + proxy: + resources: + limits: + memory: "${ES_PROXY_MEMORY_LIMITS}" + requests: + memory: "${ES_PROXY_MEMORY_REQUESTS}" + redundancyPolicy: "${ES_REDUNDANCY_POLICY}" + visualization: + type: "kibana" + kibana: + replicas: 1 + collection: + logs: + type: "vector" + vector: {} +--- +apiVersion: logging.openshift.io/v1 +kind: ClusterLogForwarder +metadata: + namespace: openshift-logging + name: instance + labels: {} +spec: + pipelines: + - name: forward-logs + inputRefs: ${FORWARD_LOGS} + outputRefs: + - default diff --git a/workloads/logging/files/logging-stack.yml b/workloads/logging/files/logging-stack.yml index 5c79f36f..7410fc8f 100644 --- a/workloads/logging/files/logging-stack.yml +++ b/workloads/logging/files/logging-stack.yml @@ -1,14 +1,12 @@ --- - apiVersion: v1 kind: Namespace metadata: - name: openshift-operators-redhat + name: openshift-operators-redhat annotations: openshift.io/node-selector: "" labels: - openshift.io/cluster-monitoring: "true" - + openshift.io/cluster-monitoring: "true" --- apiVersion: v1 kind: Namespace @@ -18,56 +16,53 @@ metadata: openshift.io/node-selector: "" labels: openshift.io/cluster-monitoring: "true" - --- apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: openshift-operators-redhat - namespace: openshift-operators-redhat + namespace: openshift-operators-redhat spec: {} - ---- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: cluster-logging - namespace: openshift-logging -spec: - targetNamespaces: - - openshift-logging - --- apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: "elasticsearch-operator" - namespace: "openshift-operators-redhat" + namespace: "openshift-operators-redhat" spec: - channel: "$CHANNEL" + channel: "${CHANNEL}" installPlanApproval: "Automatic" - source: "redhat-operators" + source: "redhat-operators" sourceNamespace: "openshift-marketplace" name: "elasticsearch-operator" - +--- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: cluster-logging + namespace: openshift-logging +spec: + targetNamespaces: + - openshift-logging --- apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: cluster-logging - namespace: openshift-logging + namespace: openshift-logging spec: - channel: "$CHANNEL" + channel: "${CHANNEL}" name: cluster-logging - source: redhat-operators + source: redhat-operators sourceNamespace: openshift-marketplace - --- -apiVersion: logging.openshift.io/v1 +apiVersion: "logging.openshift.io/v1" kind: "ClusterLogging" metadata: name: "instance" namespace: "openshift-logging" + annotations: + logging.openshift.io/preview-vector-collector: enabled spec: managementState: "Managed" logStore: @@ -80,38 +75,36 @@ spec: audit: maxAge: 30d elasticsearch: - nodeCount: $ES_NODE_COUNT + nodeCount: ${ES_NODE_COUNT} storage: - storageClassName: $ES_STORAGE_CLASS - size: $ES_STORAGE_SIZE + storageClassName: "${ES_STORAGE_CLASS}" + size: "${ES_STORAGE_SIZE}" + resources: + limits: + memory: "${ES_MEMORY_LIMITS}" + requests: + memory: "${ES_MEMORY_REQUESTS}" + proxy: resources: - requests: - memory: $ES_MEMORY_REQUESTS - proxy: limits: - memory: $ES_PROXY_MEMORY_LIMITS + memory: "${ES_PROXY_MEMORY_LIMITS}" requests: - memory: $ES_PROXY_MEMORY_REQUESTS - redundancyPolicy: $ES_REDUNDANCY_POLICY + memory: "${ES_PROXY_MEMORY_REQUESTS}" + redundancyPolicy: "${ES_REDUNDANCY_POLICY}" visualization: type: "kibana" kibana: replicas: 1 - curation: - type: "curator" - curator: - schedule: "30 3 * * *" collection: logs: type: "fluentd" fluentd: resources: limits: - memory: $FLUENTD_MEMORY_LIMITS + memory: 1Gi requests: - cpu: $FLUENTD_CPU_REQUESTS - memory: $FLUENTD_MEMORY_REQUESTS - + cpu: 500m + memory: 1Gi --- apiVersion: logging.openshift.io/v1 kind: ClusterLogForwarder @@ -122,6 +115,6 @@ metadata: spec: pipelines: - name: forward-logs - inputRefs: $FORWARD_LOGS + inputRefs: ${FORWARD_LOGS} outputRefs: - default diff --git a/workloads/logging/files/logging-stack_custom_es.yml b/workloads/logging/files/logging-stack_custom_es.yml index c107f9af..659bcee8 100644 --- a/workloads/logging/files/logging-stack_custom_es.yml +++ b/workloads/logging/files/logging-stack_custom_es.yml @@ -1,14 +1,12 @@ --- - apiVersion: v1 kind: Namespace metadata: - name: openshift-operators-redhat + name: openshift-operators-redhat annotations: openshift.io/node-selector: "" labels: - openshift.io/cluster-monitoring: "true" - + openshift.io/cluster-monitoring: "true" --- apiVersion: v1 kind: Namespace @@ -18,15 +16,25 @@ metadata: openshift.io/node-selector: "" labels: openshift.io/cluster-monitoring: "true" - --- apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: openshift-operators-redhat - namespace: openshift-operators-redhat + namespace: openshift-operators-redhat spec: {} - +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: "elasticsearch-operator" + namespace: "openshift-operators-redhat" +spec: + channel: "${CHANNEL}" + installPlanApproval: "Automatic" + source: "redhat-operators" + sourceNamespace: "openshift-marketplace" + name: "elasticsearch-operator" --- apiVersion: operators.coreos.com/v1 kind: OperatorGroup @@ -36,25 +44,25 @@ metadata: spec: targetNamespaces: - openshift-logging - --- apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: cluster-logging - namespace: openshift-logging + namespace: openshift-logging spec: - channel: "$CHANNEL" + channel: "${CHANNEL}" name: cluster-logging - source: redhat-operators + source: redhat-operators sourceNamespace: openshift-marketplace - --- -apiVersion: logging.openshift.io/v1 +apiVersion: "logging.openshift.io/v1" kind: "ClusterLogging" metadata: name: "instance" namespace: "openshift-logging" + annotations: + logging.openshift.io/preview-vector-collector: enabled spec: managementState: "Managed" logStore: @@ -66,21 +74,31 @@ spec: maxAge: 30d audit: maxAge: 30d - curation: - type: "curator" - curator: - schedule: "30 3 * * *" - collection: - logs: - type: "fluentd" - fluentd: + elasticsearch: + nodeCount: ${ES_NODE_COUNT} + storage: + storageClassName: "${ES_STORAGE_CLASS}" + size: "${ES_STORAGE_SIZE}" + resources: + limits: + memory: "${ES_MEMORY_LIMITS}" + requests: + memory: "${ES_MEMORY_REQUESTS}" + proxy: resources: limits: - memory: $FLUENTD_MEMORY_LIMITS + memory: "${ES_PROXY_MEMORY_LIMITS}" requests: - cpu: $FLUENTD_CPU_REQUESTS - memory: $FLUENTD_MEMORY_REQUESTS - + memory: "${ES_PROXY_MEMORY_REQUESTS}" + redundancyPolicy: "${ES_REDUNDANCY_POLICY}" + visualization: + type: "kibana" + kibana: + replicas: 1 + collection: + logs: + type: "vector" + vector: {} --- apiVersion: logging.openshift.io/v1 kind: ClusterLogForwarder @@ -92,9 +110,9 @@ spec: outputs: - name: elasticsearch-external type: "elasticsearch" - url: $CUSTOM_ES_URL + url: ${CUSTOM_ES_URL} pipelines: - name: forward-logs - inputRefs: $FORWARD_LOGS + inputRefs: ${FORWARD_LOGS} outputRefs: - elasticsearch-external