diff --git a/docs/examples/solr/autoscaler/combined.yaml b/docs/examples/solr/autoscaler/combined.yaml new file mode 100644 index 0000000000..6265d5f238 --- /dev/null +++ b/docs/examples/solr/autoscaler/combined.yaml @@ -0,0 +1,18 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-combined + namespace: demo +spec: + version: 9.6.1 + replicas: 2 + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: longhorn \ No newline at end of file diff --git a/docs/examples/solr/autoscaler/compute/combined-scaler.yaml b/docs/examples/solr/autoscaler/compute/combined-scaler.yaml new file mode 100644 index 0000000000..0d997cc688 --- /dev/null +++ b/docs/examples/solr/autoscaler/compute/combined-scaler.yaml @@ -0,0 +1,24 @@ +apiVersion: autoscaling.kubedb.com/v1alpha1 +kind: SolrAutoscaler +metadata: + name: sl-node-autoscaler + namespace: demo +spec: + databaseRef: + name: solr-combined + opsRequestOptions: + timeout: 5m + apply: IfReady + compute: + node: + trigger: "On" + podLifeTimeThreshold: 5m + resourceDiffPercentage: 5 + minAllowed: + cpu: 1 + memory: 2Gi + maxAllowed: + cpu: 2 + memory: 3Gi + controlledResources: ["cpu", "memory"] + containerControlledValues: "RequestsAndLimits" \ No newline at end of file diff --git a/docs/examples/solr/autoscaler/compute/topology-scaler.yaml b/docs/examples/solr/autoscaler/compute/topology-scaler.yaml new file mode 100644 index 0000000000..bc9f2d727b --- /dev/null +++ b/docs/examples/solr/autoscaler/compute/topology-scaler.yaml @@ -0,0 +1,24 @@ +apiVersion: autoscaling.kubedb.com/v1alpha1 +kind: SolrAutoscaler +metadata: + name: sl-data-autoscaler + namespace: demo +spec: + databaseRef: + name: solr-cluster + opsRequestOptions: + timeout: 5m + apply: IfReady + compute: + data: + trigger: "On" + podLifeTimeThreshold: 5m + resourceDiffPercentage: 5 + minAllowed: + cpu: 1 + memory: 2.5Gi + maxAllowed: + cpu: 2 + memory: 3Gi + controlledResources: ["cpu", "memory"] + containerControlledValues: "RequestsAndLimits" \ No newline at end of file diff --git a/docs/examples/solr/autoscaler/storage/combined-scaler.yaml b/docs/examples/solr/autoscaler/storage/combined-scaler.yaml new file mode 100644 index 0000000000..0effd52839 --- /dev/null +++ b/docs/examples/solr/autoscaler/storage/combined-scaler.yaml @@ -0,0 +1,14 @@ +apiVersion: autoscaling.kubedb.com/v1alpha1 +kind: SolrAutoscaler +metadata: + name: sl-storage-autoscaler-combined + namespace: demo +spec: + databaseRef: + name: solr-combined + storage: + node: + expansionMode: "Offline" + trigger: "On" + usageThreshold: 60 + scalingThreshold: 100 \ No newline at end of file diff --git a/docs/examples/solr/autoscaler/storage/topoology-scaler.yaml b/docs/examples/solr/autoscaler/storage/topoology-scaler.yaml new file mode 100644 index 0000000000..8e2c7a6568 --- /dev/null +++ b/docs/examples/solr/autoscaler/storage/topoology-scaler.yaml @@ -0,0 +1,14 @@ +apiVersion: autoscaling.kubedb.com/v1alpha1 +kind: SolrAutoscaler +metadata: + name: sl-storage-autoscaler-topology + namespace: demo +spec: + databaseRef: + name: solr-cluster + storage: + data: + expansionMode: "Offline" + trigger: "On" + usageThreshold: 60 + scalingThreshold: 100 \ No newline at end of file diff --git a/docs/examples/solr/autoscaler/topology.yaml b/docs/examples/solr/autoscaler/topology.yaml new file mode 100644 index 0000000000..b3d6138c89 --- /dev/null +++ b/docs/examples/solr/autoscaler/topology.yaml @@ -0,0 +1,37 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-cluster + namespace: demo +spec: + version: 9.6.1 + zookeeperRef: + name: zoo + namespace: demo + topology: + overseer: + replicas: 1 + storage: + storageClassName: longhorn + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + data: + replicas: 1 + storage: + storageClassName: longhorn + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + coordinator: + storage: + storageClassName: longhorn + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/docs/examples/solr/configuration/sl-custom-config.yaml b/docs/examples/solr/configuration/sl-custom-config.yaml new file mode 100644 index 0000000000..b12aff35e7 --- /dev/null +++ b/docs/examples/solr/configuration/sl-custom-config.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Secret +metadata: + name: sl-custom-config + namespace: demo +stringData: + "solr.xml": | + + ${solr.max.booleanClauses:2024} + + + solrbackup + us-east-1 + http://s3proxy-s3.demo.svc:80 + + + \ No newline at end of file diff --git a/docs/examples/solr/configuration/sl-custom-nodeselector.yaml b/docs/examples/solr/configuration/sl-custom-nodeselector.yaml new file mode 100644 index 0000000000..5ddebfe6ed --- /dev/null +++ b/docs/examples/solr/configuration/sl-custom-nodeselector.yaml @@ -0,0 +1,21 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-custom-nodeselector + namespace: demo +spec: + version: 9.6.1 + replicas: 2 + podTemplate: + spec: + nodeSelector: + topology.gke.io/zone: us-central1-b + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/docs/examples/solr/configuration/sl-custom-podtemplate.yaml b/docs/examples/solr/configuration/sl-custom-podtemplate.yaml new file mode 100644 index 0000000000..7106a1f52c --- /dev/null +++ b/docs/examples/solr/configuration/sl-custom-podtemplate.yaml @@ -0,0 +1,68 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-misc-config + namespace: demo +spec: + version: "9.6.1" + zookeeperRef: + name: zoo + namespace: demo + topology: + data: + replicas: 1 + podTemplate: + spec: + containers: + - name: "solr" + resources: + requests: + cpu: "900m" + limits: + cpu: "900m" + memory: "2.5Gi" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + overseer: + replicas: 1 + podTemplate: + spec: + containers: + - name: "solr" + resources: + requests: + cpu: "900m" + limits: + cpu: "900m" + memory: "2.5Gi" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + coordinator: + replicas: 1 + podTemplate: + spec: + containers: + - name: "solr" + resources: + requests: + cpu: "900m" + limits: + cpu: "900m" + memory: "2.5Gi" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard \ No newline at end of file diff --git a/docs/examples/solr/configuration/solr-combined.yaml b/docs/examples/solr/configuration/solr-combined.yaml new file mode 100644 index 0000000000..f73db241f6 --- /dev/null +++ b/docs/examples/solr/configuration/solr-combined.yaml @@ -0,0 +1,20 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr + namespace: demo +spec: + configSecret: + name: sl-combined-custom-config + version: 9.6.1 + replicas: 2 + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: longhorn diff --git a/docs/examples/solr/configuration/solr-with-tolerations.yaml b/docs/examples/solr/configuration/solr-with-tolerations.yaml new file mode 100644 index 0000000000..ce4548b827 --- /dev/null +++ b/docs/examples/solr/configuration/solr-with-tolerations.yaml @@ -0,0 +1,28 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-with-toleration + namespace: demo +spec: + version: 9.6.1 + replicas: 2 + podTemplate: + spec: + tolerations: + - key: "key1" + operator: "Equal" + value: "node7" + effect: "NoSchedule" + - key: "key1" + operator: "Equal" + value: "node8" + effect: "NoSchedule" + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/docs/examples/solr/configuration/solr-without-toleration.yaml b/docs/examples/solr/configuration/solr-without-toleration.yaml new file mode 100644 index 0000000000..9fc21cb8e7 --- /dev/null +++ b/docs/examples/solr/configuration/solr-without-toleration.yaml @@ -0,0 +1,17 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-without-toleration + namespace: demo +spec: + version: 9.6.1 + replicas: 2 + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/docs/examples/solr/monitoring/solr-builtin.yaml b/docs/examples/solr/monitoring/solr-builtin.yaml new file mode 100644 index 0000000000..19375fe9ba --- /dev/null +++ b/docs/examples/solr/monitoring/solr-builtin.yaml @@ -0,0 +1,23 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: builtin-prom-sl + namespace: demo +spec: + version: 9.6.1 + replicas: 2 + enableSSL: true + monitor: + agent: prometheus.io/builtin + solrModules: + - prometheus-exporter + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard diff --git a/docs/examples/solr/monitoring/solr-operator.yaml b/docs/examples/solr/monitoring/solr-operator.yaml new file mode 100644 index 0000000000..640abbf549 --- /dev/null +++ b/docs/examples/solr/monitoring/solr-operator.yaml @@ -0,0 +1,29 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: operator-prom-sl + namespace: demo +spec: + version: 9.6.1 + replicas: 2 + monitor: + agent: prometheus.io/operator + prometheus: + serviceMonitor: + labels: + release: prometheus + interval: 10s + solrModules: + - s3-repository + - gcs-repository + - prometheus-exporter + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard \ No newline at end of file diff --git a/docs/examples/solr/reconfigure-tls/add-tls.yaml b/docs/examples/solr/reconfigure-tls/add-tls.yaml new file mode 100644 index 0000000000..a5db551de6 --- /dev/null +++ b/docs/examples/solr/reconfigure-tls/add-tls.yaml @@ -0,0 +1,24 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: slops-add-tls + namespace: demo +spec: + apply: IfReady + tls: + issuerRef: + apiGroup: cert-manager.io + name: solr-ca-issuer + kind: Issuer + certificates: + - alias: server + subject: + organizations: + - kubedb:server + dnsNames: + - localhost + ipAddresses: + - "127.0.0.1" + databaseRef: + name: solr-cluster + type: ReconfigureTLS \ No newline at end of file diff --git a/docs/examples/solr/reconfigure-tls/remove-tls.yaml b/docs/examples/solr/reconfigure-tls/remove-tls.yaml new file mode 100644 index 0000000000..182fdffac0 --- /dev/null +++ b/docs/examples/solr/reconfigure-tls/remove-tls.yaml @@ -0,0 +1,11 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: slops-remove + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: solr-cluster + tls: + remove: true \ No newline at end of file diff --git a/docs/examples/solr/reconfigure-tls/rotate-tls.yaml b/docs/examples/solr/reconfigure-tls/rotate-tls.yaml new file mode 100644 index 0000000000..a3ee6c6b58 --- /dev/null +++ b/docs/examples/solr/reconfigure-tls/rotate-tls.yaml @@ -0,0 +1,11 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: slops-rotate + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: solr-cluster + tls: + rotateCertificates: true \ No newline at end of file diff --git a/docs/examples/solr/reconfigure-tls/sl-new-issuer.yaml b/docs/examples/solr/reconfigure-tls/sl-new-issuer.yaml new file mode 100644 index 0000000000..ad7c81b5c3 --- /dev/null +++ b/docs/examples/solr/reconfigure-tls/sl-new-issuer.yaml @@ -0,0 +1,8 @@ +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: sl-new-issuer + namespace: demo +spec: + ca: + secretName: solr-new-ca \ No newline at end of file diff --git a/docs/examples/solr/reconfigure-tls/sl-update-issuer.yaml b/docs/examples/solr/reconfigure-tls/sl-update-issuer.yaml new file mode 100644 index 0000000000..b8f39a765b --- /dev/null +++ b/docs/examples/solr/reconfigure-tls/sl-update-issuer.yaml @@ -0,0 +1,14 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: slops-update-issuer + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: solr-cluster + tls: + issuerRef: + name: sl-new-issuer + kind: Issuer + apiGroup: "cert-manager.io" \ No newline at end of file diff --git a/docs/examples/solr/reconfigure-tls/update-issuer.yaml b/docs/examples/solr/reconfigure-tls/update-issuer.yaml new file mode 100644 index 0000000000..b8f39a765b --- /dev/null +++ b/docs/examples/solr/reconfigure-tls/update-issuer.yaml @@ -0,0 +1,14 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: slops-update-issuer + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: solr-cluster + tls: + issuerRef: + name: sl-new-issuer + kind: Issuer + apiGroup: "cert-manager.io" \ No newline at end of file diff --git a/docs/examples/solr/reconfigure/new-sl-custom-config.yaml b/docs/examples/solr/reconfigure/new-sl-custom-config.yaml new file mode 100644 index 0000000000..dbd621f0d3 --- /dev/null +++ b/docs/examples/solr/reconfigure/new-sl-custom-config.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Secret +metadata: + name: new-sl-custom-config + namespace: demo +stringData: + "solr.xml": | + + ${solr.max.booleanClauses:2030} + + + solrbackup + us-east-1 + http://s3proxy-s3.demo.svc:80 + + + \ No newline at end of file diff --git a/docs/examples/solr/reconfigure/sl-apply-config.yaml b/docs/examples/solr/reconfigure/sl-apply-config.yaml new file mode 100644 index 0000000000..62f7de5a50 --- /dev/null +++ b/docs/examples/solr/reconfigure/sl-apply-config.yaml @@ -0,0 +1,23 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: sl-reconfigure-apply-config + namespace: demo +spec: + apply: IfReady + configuration: + applyConfig: + solr.xml: | + + ${solr.max.booleanClauses:2024} + + + solrbackup + us-east-1 + http://s3proxy-s3.demo.svc:80 + + + + databaseRef: + name: solr-combined + type: Reconfigure \ No newline at end of file diff --git a/docs/examples/solr/reconfigure/sl-custom-config.yaml b/docs/examples/solr/reconfigure/sl-custom-config.yaml new file mode 100644 index 0000000000..b12aff35e7 --- /dev/null +++ b/docs/examples/solr/reconfigure/sl-custom-config.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Secret +metadata: + name: sl-custom-config + namespace: demo +stringData: + "solr.xml": | + + ${solr.max.booleanClauses:2024} + + + solrbackup + us-east-1 + http://s3proxy-s3.demo.svc:80 + + + \ No newline at end of file diff --git a/docs/examples/solr/reconfigure/sl-reconfigure-apply-config.yaml b/docs/examples/solr/reconfigure/sl-reconfigure-apply-config.yaml new file mode 100644 index 0000000000..b9b9ef5075 --- /dev/null +++ b/docs/examples/solr/reconfigure/sl-reconfigure-apply-config.yaml @@ -0,0 +1,23 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: sl-reconfigure-apply-config + namespace: demo +spec: + apply: IfReady + configuration: + applyConfig: + solr.xml: | + + ${solr.max.booleanClauses:2024} + + + solrbackup + us-east-1 + http://s3proxy-s3.demo.svc:80 + + + + databaseRef: + name: solr + type: Reconfigure \ No newline at end of file diff --git a/docs/examples/solr/reconfigure/sl-reconfigure-custom-config.yaml b/docs/examples/solr/reconfigure/sl-reconfigure-custom-config.yaml new file mode 100644 index 0000000000..4118884c98 --- /dev/null +++ b/docs/examples/solr/reconfigure/sl-reconfigure-custom-config.yaml @@ -0,0 +1,13 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: sl-reconfigure-custom-config + namespace: demo +spec: + apply: IfReady + configuration: + configSecret: + name: sl-combined-custom-config + databaseRef: + name: solr + type: Reconfigure \ No newline at end of file diff --git a/docs/examples/solr/reconfigure/solr-combined.yaml b/docs/examples/solr/reconfigure/solr-combined.yaml new file mode 100644 index 0000000000..f73db241f6 --- /dev/null +++ b/docs/examples/solr/reconfigure/solr-combined.yaml @@ -0,0 +1,20 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr + namespace: demo +spec: + configSecret: + name: sl-combined-custom-config + version: 9.6.1 + replicas: 2 + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: longhorn diff --git a/docs/examples/solr/restart/ops.yaml b/docs/examples/solr/restart/ops.yaml new file mode 100644 index 0000000000..983ce6d031 --- /dev/null +++ b/docs/examples/solr/restart/ops.yaml @@ -0,0 +1,10 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: restart + namespace: demo +spec: + apply: IfReady + databaseRef: + name: solr-cluster + type: Restart \ No newline at end of file diff --git a/docs/examples/solr/restart/solr-cluster.yaml b/docs/examples/solr/restart/solr-cluster.yaml new file mode 100644 index 0000000000..1d8c04db92 --- /dev/null +++ b/docs/examples/solr/restart/solr-cluster.yaml @@ -0,0 +1,34 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-cluster + namespace: demo +spec: + version: 9.6.1 + zookeeperRef: + name: zoo + namespace: demo + topology: + overseer: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + data: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + coordinator: + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/docs/examples/solr/scaling/horizontal/combined/scaling.yaml b/docs/examples/solr/scaling/horizontal/combined/scaling.yaml new file mode 100644 index 0000000000..942ec52f4b --- /dev/null +++ b/docs/examples/solr/scaling/horizontal/combined/scaling.yaml @@ -0,0 +1,11 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: slops-hscale-up-combined + namespace: demo +spec: + databaseRef: + name: solr-combined + type: HorizontalScaling + horizontalScaling: + node: 4 \ No newline at end of file diff --git a/docs/examples/solr/scaling/horizontal/combined/solr.yaml b/docs/examples/solr/scaling/horizontal/combined/solr.yaml new file mode 100644 index 0000000000..709d7a5805 --- /dev/null +++ b/docs/examples/solr/scaling/horizontal/combined/solr.yaml @@ -0,0 +1,17 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-combined + namespace: demo +spec: + version: 9.4.1 + replicas: 2 + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/docs/examples/solr/scaling/horizontal/topology/scaling.yaml b/docs/examples/solr/scaling/horizontal/topology/scaling.yaml new file mode 100644 index 0000000000..8ec6ea83a1 --- /dev/null +++ b/docs/examples/solr/scaling/horizontal/topology/scaling.yaml @@ -0,0 +1,13 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: slops-hscale-up-topology + namespace: demo +spec: + databaseRef: + name: solr-cluster + type: HorizontalScaling + horizontalScaling: + data: 2 + overseer: 2 + coordinator: 2 \ No newline at end of file diff --git a/docs/examples/solr/scaling/horizontal/topology/solr.yaml b/docs/examples/solr/scaling/horizontal/topology/solr.yaml new file mode 100644 index 0000000000..f14191731b --- /dev/null +++ b/docs/examples/solr/scaling/horizontal/topology/solr.yaml @@ -0,0 +1,34 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-cluster + namespace: demo +spec: + version: 9.4.1 + zookeeperRef: + name: zoo + namespace: demo + topology: + overseer: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + data: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + coordinator: + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/docs/examples/solr/scaling/vertical/combined/scaling.yaml b/docs/examples/solr/scaling/vertical/combined/scaling.yaml new file mode 100644 index 0000000000..cedd8b2c18 --- /dev/null +++ b/docs/examples/solr/scaling/vertical/combined/scaling.yaml @@ -0,0 +1,18 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: slops-vscale-combined + namespace: demo +spec: + databaseRef: + name: solr-cluster + type: VerticalScaling + verticalScaling: + node: + resources: + limits: + cpu: 1 + memory: 2.5Gi + requests: + cpu: 1 + memory: 2.5Gi \ No newline at end of file diff --git a/docs/examples/solr/scaling/vertical/combined/solr.yaml b/docs/examples/solr/scaling/vertical/combined/solr.yaml new file mode 100644 index 0000000000..709d7a5805 --- /dev/null +++ b/docs/examples/solr/scaling/vertical/combined/solr.yaml @@ -0,0 +1,17 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-combined + namespace: demo +spec: + version: 9.4.1 + replicas: 2 + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/docs/examples/solr/scaling/vertical/topology/scaling.yaml b/docs/examples/solr/scaling/vertical/topology/scaling.yaml new file mode 100644 index 0000000000..08f7b3b11b --- /dev/null +++ b/docs/examples/solr/scaling/vertical/topology/scaling.yaml @@ -0,0 +1,34 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: slops-vscale-topology + namespace: demo +spec: + databaseRef: + name: solr-cluster + type: VerticalScaling + verticalScaling: + data: + resources: + limits: + cpu: 1 + memory: 2.5Gi + requests: + cpu: 1 + memory: 2.5Gi + overseer: + resources: + limits: + cpu: 1 + memory: 2.5Gi + requests: + cpu: 1 + memory: 2.5Gi + coordinator: + resources: + limits: + cpu: 1 + memory: 2.5Gi + requests: + cpu: 1 + memory: 2.5Gi \ No newline at end of file diff --git a/docs/examples/solr/scaling/vertical/topology/solr.yaml b/docs/examples/solr/scaling/vertical/topology/solr.yaml new file mode 100644 index 0000000000..f53aad20e0 --- /dev/null +++ b/docs/examples/solr/scaling/vertical/topology/solr.yaml @@ -0,0 +1,34 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-cluster + namespace: demo +spec: + version: 9.4.1 + zookeeperRef: + name: zoo + namespace: demo + topology: + overseer: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + data: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + coordinator: + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/docs/examples/solr/tls/sl-issuer.yaml b/docs/examples/solr/tls/sl-issuer.yaml new file mode 100644 index 0000000000..d0fa139d07 --- /dev/null +++ b/docs/examples/solr/tls/sl-issuer.yaml @@ -0,0 +1,8 @@ +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: solr-ca-issuer + namespace: demo +spec: + ca: + secretName: solr-ca \ No newline at end of file diff --git a/docs/examples/solr/tls/solr-combined.yaml b/docs/examples/solr/tls/solr-combined.yaml new file mode 100644 index 0000000000..d22a1b832b --- /dev/null +++ b/docs/examples/solr/tls/solr-combined.yaml @@ -0,0 +1,33 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-combined + namespace: demo +spec: + version: 9.4.1 + replicas: 2 + enableSSL: true + tls: + issuerRef: + apiGroup: cert-manager.io + name: solr-ca-issuer + kind: Issuer + certificates: + - alias: server + subject: + organizations: + - kubedb:server + dnsNames: + - localhost + ipAddresses: + - "127.0.0.1" + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard diff --git a/docs/examples/solr/tls/solr-topology.yaml b/docs/examples/solr/tls/solr-topology.yaml new file mode 100644 index 0000000000..4ce8d89b23 --- /dev/null +++ b/docs/examples/solr/tls/solr-topology.yaml @@ -0,0 +1,52 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-cluster + namespace: demo +spec: + enableSSL: true + tls: + issuerRef: + apiGroup: cert-manager.io + name: self-signed-issuer + kind: Issuer + certificates: + - alias: server + subject: + organizations: + - kubedb:server + dnsNames: + - localhost + ipAddresses: + - "127.0.0.1" + version: 9.4.1 + zookeeperRef: + name: zoo + namespace: demo + topology: + overseer: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + data: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + coordinator: + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard \ No newline at end of file diff --git a/docs/examples/solr/update-version/solr.yaml b/docs/examples/solr/update-version/solr.yaml new file mode 100644 index 0000000000..f53aad20e0 --- /dev/null +++ b/docs/examples/solr/update-version/solr.yaml @@ -0,0 +1,34 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-cluster + namespace: demo +spec: + version: 9.4.1 + zookeeperRef: + name: zoo + namespace: demo + topology: + overseer: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + data: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + coordinator: + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/docs/examples/solr/update-version/update-version-ops.yaml b/docs/examples/solr/update-version/update-version-ops.yaml new file mode 100644 index 0000000000..7dac89eedc --- /dev/null +++ b/docs/examples/solr/update-version/update-version-ops.yaml @@ -0,0 +1,11 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: solr-update-version + namespace: demo +spec: + databaseRef: + name: solr-cluster + type: UpdateVersion + updateVersion: + targetVersion: 9.6.1 \ No newline at end of file diff --git a/docs/examples/solr/volume-expansion/combined.yaml b/docs/examples/solr/volume-expansion/combined.yaml new file mode 100644 index 0000000000..709d7a5805 --- /dev/null +++ b/docs/examples/solr/volume-expansion/combined.yaml @@ -0,0 +1,17 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-combined + namespace: demo +spec: + version: 9.4.1 + replicas: 2 + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/docs/examples/solr/volume-expansion/solr-volume-expansion-combined.yaml b/docs/examples/solr/volume-expansion/solr-volume-expansion-combined.yaml new file mode 100644 index 0000000000..b25b2c900d --- /dev/null +++ b/docs/examples/solr/volume-expansion/solr-volume-expansion-combined.yaml @@ -0,0 +1,13 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: sl-volume-exp-combined + namespace: demo +spec: + apply: IfReady + databaseRef: + name: solr-cluster + type: VolumeExpansion + volumeExpansion: + mode: Offline + node: 11Gi diff --git a/docs/examples/solr/volume-expansion/solr-volume-expansion-topology.yaml b/docs/examples/solr/volume-expansion/solr-volume-expansion-topology.yaml new file mode 100644 index 0000000000..52420a6280 --- /dev/null +++ b/docs/examples/solr/volume-expansion/solr-volume-expansion-topology.yaml @@ -0,0 +1,14 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: sl-volume-exp-topology + namespace: demo +spec: + apply: IfReady + databaseRef: + name: solr-cluster + type: VolumeExpansion + volumeExpansion: + mode: Offline + data: 11Gi + overseer : 11Gi \ No newline at end of file diff --git a/docs/examples/solr/volume-expansion/topology.yaml b/docs/examples/solr/volume-expansion/topology.yaml new file mode 100644 index 0000000000..f14191731b --- /dev/null +++ b/docs/examples/solr/volume-expansion/topology.yaml @@ -0,0 +1,34 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-cluster + namespace: demo +spec: + version: 9.4.1 + zookeeperRef: + name: zoo + namespace: demo + topology: + overseer: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + data: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + coordinator: + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/docs/guides/solr/autoscaler/_index.md b/docs/guides/solr/autoscaler/_index.md new file mode 100644 index 0000000000..cc90820d31 --- /dev/null +++ b/docs/guides/solr/autoscaler/_index.md @@ -0,0 +1,10 @@ +--- +title: Autoscaling +menu: + docs_{{ .version }}: + identifier: sl-autoscaling-solr + name: Autoscaling + parent: sl-solr-guides + weight: 28 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/solr/autoscaler/compute/_index.md b/docs/guides/solr/autoscaler/compute/_index.md new file mode 100644 index 0000000000..6dca8cfb4d --- /dev/null +++ b/docs/guides/solr/autoscaler/compute/_index.md @@ -0,0 +1,10 @@ +--- +title: Solr Compute Autoscaler +menu: + docs_{{ .version }}: + identifier: sl-compute-autoscaling-solr + name: Solr Compute Autoscaling + parent: sl-autoscaling-solr + weight: 14 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/solr/autoscaler/compute/combined.md b/docs/guides/solr/autoscaler/compute/combined.md new file mode 100644 index 0000000000..6fdf320720 --- /dev/null +++ b/docs/guides/solr/autoscaler/compute/combined.md @@ -0,0 +1,446 @@ +--- +title: Solr Combined Cluster Autoscaling +menu: + docs_{{ .version }}: + identifier: sl-auto-scaling-combined + name: Combined + parent: sl-compute-autoscaling-solr + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Autoscaling the Compute Resource of an Solr Combined Cluster + +This guide will show you how to use `KubeDB` to autoscale compute resources i.e. `cpu` and `memory` of an Solr combined cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Community, Enterprise and Autoscaler operator in your cluster following the steps [here](/docs/setup/README.md). + +- Install `Metrics Server` from [here](https://github.com/kubernetes-sigs/metrics-server#installation) + +- You should be familiar with the following `KubeDB` concepts: + - [Solr](/docs/guides/solr/concepts/solr.md) + - [SolrAutoscaler](/docs/guides/solr/concepts/autoscaler.md) + - [SolrOpsRequest](/docs/guides/solr/concepts/solropsrequests.md) + - [Compute Resource Autoscaling Overview](/docs/guides/solr/autoscaler/compute/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in this [directory](/docs/examples/solr/autoscaler) of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Autoscaling of a Combined Cluster + +Here, we are going to deploy an `Solr` in combined cluster mode using a supported version by `KubeDB` operator. Then we are going to apply `SolrAutoscaler` to set up autoscaling. + +### Deploy Solr Combined + +In this section, we are going to deploy an Solr combined cluster with SolrVersion `9.6.1`. Then, in the next section, we will set up autoscaling for this database using `SolrAutoscaler` CRD. Below is the YAML of the `Solr` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-combined + namespace: demo +spec: + version: 9.6.1 + replicas: 2 + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +``` + +Let's create the `Solr` CRO we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/autoscalers/combined.yaml +solr.kubedb.com/solr-combined created +``` + +Now, wait until `es-combined` has status `Ready`. i.e, + +```bash +$ kubectl get sl -n demo +NAME TYPE VERSION STATUS AGE +solr-combined kubedb.com/v1alpha2 9.6.1 Ready 83s + +``` + +Let's check the Pod containers resources, + +```bash +$ kubectl get pod -n demo solr-combined-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "2Gi" + }, + "requests": { + "cpu": "900m", + "memory": "2Gi" + } +} +``` + +Let's check the Solr resources, + +```bash +$ kubectl get solr -n demo solr-combined -o json | jq '.spec.podTemplate.spec.containers[] | select(.name == "solr") | .resources' +{ + "limits": { + "memory": "2Gi" + }, + "requests": { + "cpu": "900m", + "memory": "2Gi" + } +} + +``` + +You can see from the above outputs that the resources are the same as the ones we have assigned while deploying the Solr. + +We are now ready to apply the `SolrAutoscaler` CRO to set up autoscaling for this database. + +### Compute Resource Autoscaling + +Here, we are going to set up compute (ie. `cpu` and `memory`) autoscaling using an SolrAutoscaler Object. + +#### Create SolrAutoscaler Object + +To set up compute resource autoscaling for this combined cluster, we have to create a `SolrAutoscaler` CRO with our desired configuration. Below is the YAML of the `SolrAutoscaler` object that we are going to create, + +```yaml +apiVersion: autoscaling.kubedb.com/v1alpha1 +kind: SolrAutoscaler +metadata: + name: sl-node-autoscaler + namespace: demo +spec: + databaseRef: + name: solr-combined + opsRequestOptions: + timeout: 5m + apply: IfReady + compute: + node: + trigger: "On" + podLifeTimeThreshold: 5m + resourceDiffPercentage: 5 + minAllowed: + cpu: 1 + memory: 2Gi + maxAllowed: + cpu: 2 + memory: 3Gi + controlledResources: ["cpu", "memory"] + containerControlledValues: "RequestsAndLimits" +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing compute resource autoscaling on `solr-combined` database. +- `spec.compute.node.trigger` specifies that compute resource autoscaling is enabled for this cluster. +- `spec.compute.node.podLifeTimeThreshold` specifies the minimum lifetime for at least one of the pod to initiate a vertical scaling. +- `spec.compute.node.minAllowed` specifies the minimum allowed resources for the Solr node. +- `spec.compute.node.maxAllowed` specifies the maximum allowed resources for the Solr node. +- `spec.compute.node.controlledResources` specifies the resources that are controlled by the autoscaler. +- `spec.compute.node.resourceDiffPercentage` specifies the minimum resource difference in percentage. The default is 10%. + If the difference between current & recommended resource is less than ResourceDiffPercentage, Autoscaler Operator will ignore the updating. +- `spec.compute.node.containerControlledValues` specifies which resource values should be controlled. The default is "RequestsAndLimits". +- - `spec.opsRequestOptions` contains the options to pass to the created OpsRequest. It has 2 fields. Know more about them here : [timeout](/docs/guides/solr/concepts/solropsrequests.md#spectimeout), [apply](/docs/guides/solr/concepts/solropsrequests.md#specapply). + +Let's create the `SolrAutoscaler` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/autoscaler/compute/combined-scaler.yaml +solrautoscaler.autoscaling.kubedb.com/sl-node-autoscaler created +``` + +#### Verify Autoscaling is set up successfully + +Let's check that the `Solrautoscaler` resource is created successfully, + +```bash +$ kubectl get solrautoscaler -n demo +NAME AGE +sl-node-autoscaler 100s + +$ kubectl describe solrautoscaler -n demo sl-node-autoscaler +Name: sl-node-autoscaler +Namespace: demo +Labels: +Annotations: +API Version: autoscaling.kubedb.com/v1alpha1 +Kind: SolrAutoscaler +Metadata: + Creation Timestamp: 2024-10-29T12:29:57Z + Generation: 1 + Owner References: + API Version: kubedb.com/v1alpha2 + Block Owner Deletion: true + Controller: true + Kind: Solr + Name: solr-combined + UID: 422bb16e-2181-4ce3-9401-a4feef853b4e + Resource Version: 883971 + UID: 33bf5f3b-c6ad-4234-8119-fccebcb8d4b6 +Spec: + Compute: + Node: + Container Controlled Values: RequestsAndLimits + Controlled Resources: + cpu + memory + Max Allowed: + Cpu: 2 + Memory: 3Gi + Min Allowed: + Cpu: 1 + Memory: 2Gi + Pod Life Time Threshold: 5m + Resource Diff Percentage: 5 + Trigger: On + Database Ref: + Name: solr-combined + Ops Request Options: + Apply: IfReady + Timeout: 5m +Status: + Checkpoints: + Cpu Histogram: + Bucket Weights: + Index: 0 + Weight: 7649 + Index: 1 + Weight: 7457 + Index: 3 + Weight: 10000 + Index: 4 + Weight: 9817 + Reference Timestamp: 2024-10-29T12:30:00Z + Total Weight: 0.44924508934014207 + First Sample Start: 2024-10-29T12:29:42Z + Last Sample Start: 2024-10-29T12:31:49Z + Last Update Time: 2024-10-29T12:32:05Z + Memory Histogram: + Reference Timestamp: 2024-10-29T12:35:00Z + Ref: + Container Name: solr + Vpa Object Name: solr-combined + Total Samples Count: 4 + Version: v3 + Conditions: + Last Transition Time: 2024-10-29T12:30:35Z + Message: Successfully created solrOpsRequest demo/slops-solr-combined-04xbzd + Observed Generation: 1 + Reason: CreateOpsRequest + Status: True + Type: CreateOpsRequest + Vpas: + Conditions: + Last Transition Time: 2024-10-29T12:30:05Z + Status: True + Type: RecommendationProvided + Recommendation: + Container Recommendations: + Container Name: solr + Lower Bound: + Cpu: 1 + Memory: 2Gi + Target: + Cpu: 1 + Memory: 2Gi + Uncapped Target: + Cpu: 100m + Memory: 1555165137 + Upper Bound: + Cpu: 2 + Memory: 3Gi + Vpa Name: solr-combined +Events: +``` + +So, the `Solrautoscaler` resource is created successfully. + +you can see in the `Status.VPAs.Recommendation section`, that recommendation has been generated for our database. Our autoscaler operator continuously watches the recommendation generated and creates an `solropsrequest` based on the recommendations, if the database pods are needed to scaled up or down. + +Let's watch the `solropsrequest` in the demo namespace to see if any `solropsrequest` object is created. After some time you'll see that an `Solropsrequest` will be created based on the recommendation. + +```bash +$ kubectl get slops -n demo +NAME TYPE STATUS AGE +slops-solr-combined-04xbzd VerticalScaling Progressing 2m24s +``` + +Let's wait for the opsRequest to become successful. + +```bash +$ kubectl get slops -n demo +NAME TYPE STATUS AGE +slops-solr-combined-04xbzd VerticalScaling Successful 2m24s +``` + +We can see from the above output that the `SolrOpsRequest` has succeeded. If we describe the `SolrOpsRequest` we will get an overview of the steps that were followed to scale the database. + +```bash +$ kubectl describe slops -n demo slops-solr-combined-04xbzd +Name: slops-solr-combined-04xbzd +Namespace: demo +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=solr-combined + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=solrs.kubedb.com +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: SolrOpsRequest +Metadata: + Creation Timestamp: 2024-10-29T12:30:35Z + Generation: 1 + Owner References: + API Version: autoscaling.kubedb.com/v1alpha1 + Block Owner Deletion: true + Controller: true + Kind: SolrAutoscaler + Name: sl-node-autoscaler + UID: 33bf5f3b-c6ad-4234-8119-fccebcb8d4b6 + Resource Version: 883905 + UID: 709d9b24-cd19-4605-bb41-92d099758ec0 +Spec: + Apply: IfReady + Database Ref: + Name: solr-combined + Timeout: 5m0s + Type: VerticalScaling + Vertical Scaling: + Node: + Resources: + Limits: + Memory: 2Gi + Requests: + Cpu: 1 + Memory: 2Gi +Status: + Conditions: + Last Transition Time: 2024-10-29T12:30:35Z + Message: Solr ops-request has started to vertically scaling the Solr nodes + Observed Generation: 1 + Reason: VerticalScaling + Status: True + Type: VerticalScaling + Last Transition Time: 2024-10-29T12:30:38Z + Message: Successfully updated PetSets Resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-10-29T12:31:23Z + Message: Successfully Restarted Pods With Resources + Observed Generation: 1 + Reason: RestartPods + Status: True + Type: RestartPods + Last Transition Time: 2024-10-29T12:30:43Z + Message: get pod; ConditionStatus:True; PodName:solr-combined-0 + Observed Generation: 1 + Status: True + Type: GetPod--solr-combined-0 + Last Transition Time: 2024-10-29T12:30:43Z + Message: evict pod; ConditionStatus:True; PodName:solr-combined-0 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-combined-0 + Last Transition Time: 2024-10-29T12:30:48Z + Message: running pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningPod + Last Transition Time: 2024-10-29T12:31:03Z + Message: get pod; ConditionStatus:True; PodName:solr-combined-1 + Observed Generation: 1 + Status: True + Type: GetPod--solr-combined-1 + Last Transition Time: 2024-10-29T12:31:03Z + Message: evict pod; ConditionStatus:True; PodName:solr-combined-1 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-combined-1 + Last Transition Time: 2024-10-29T12:31:23Z + Message: Successfully completed the vertical scaling for RabbitMQ + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 3m35s KubeDB Ops-manager Operator Start processing for SolrOpsRequest: demo/slops-solr-combined-04xbzd + Normal Starting 3m35s KubeDB Ops-manager Operator Pausing Solr databse: demo/solr-combined + Normal Successful 3m35s KubeDB Ops-manager Operator Successfully paused Solr database: demo/solr-combined for SolrOpsRequest: slops-solr-combined-04xbzd + Normal UpdatePetSets 3m32s KubeDB Ops-manager Operator Successfully updated PetSets Resources + Warning get pod; ConditionStatus:True; PodName:solr-combined-0 3m27s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:solr-combined-0 + Warning evict pod; ConditionStatus:True; PodName:solr-combined-0 3m27s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:solr-combined-0 + Warning running pod; ConditionStatus:False 3m22s KubeDB Ops-manager Operator running pod; ConditionStatus:False + Warning get pod; ConditionStatus:True; PodName:solr-combined-1 3m7s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:solr-combined-1 + Warning evict pod; ConditionStatus:True; PodName:solr-combined-1 3m7s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:solr-combined-1 + Normal RestartPods 2m47s KubeDB Ops-manager Operator Successfully Restarted Pods With Resources + Normal Starting 2m47s KubeDB Ops-manager Operator Resuming Solr database: demo/solr-combined + Normal Successful 2m47s KubeDB Ops-manager Operator Successfully resumed Solr database: demo/solr-combined for SolrOpsRequest: slops-solr-combined-04xbzd +``` + +Now, we are going to verify from the Pod, and the Solr YAML whether the resources of the standalone database has updated to meet up the desired state, Let's check, + +```bash +$ kubectl get pod -n demo solr-combined-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "2Gi" + }, + "requests": { + "cpu": "1", + "memory": "2Gi" + } +} + +$ kubectl get solr -n demo solr-combined -o json | jq '.spec.podTemplate.spec.containers[] | select(.name == "solr") | .resources' +{ + "limits": { + "memory": "2Gi" + }, + "requests": { + "cpu": "1", + "memory": "2Gi" + } +} + +``` + +The above output verifies that we have successfully auto-scaled the resources of the Solr standalone database. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +$ kubectl delete sl -n demo solr-combined +$ kubectl delete solrautoscaler -n demo sl-node-autoscaler +$ kubectl delete ns demo +``` diff --git a/docs/guides/solr/autoscaler/compute/overview.md b/docs/guides/solr/autoscaler/compute/overview.md new file mode 100644 index 0000000000..c973e54102 --- /dev/null +++ b/docs/guides/solr/autoscaler/compute/overview.md @@ -0,0 +1,53 @@ +--- +title: Solr Compute Autoscaling Overview +menu: + docs_{{ .version }}: + identifier: sl-computer-autoscaling-overview + name: Overview + parent: sl-compute-autoscaling-solr + weight: 5 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Solr Compute Resource Autoscaling + +This guide will give an overview on how the KubeDB Autoscaler operator autoscales the database compute resources i.e. `cpu` and `memory` using `Solrautoscaler` crd. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [Solr](/docs/guides/solr/concepts/solr.md) + - [SolrAutoscaler](/docs/guides/solr/concepts/autoscaler.md) + - [SolrOpsRequest](/docs/guides/solr/concepts/solropsrequests.md) + +## How Compute Autoscaling Works + +The Auto Scaling process consists of the following steps: + +

+  Compute Autoscaling Flow +

+ +1. At first, a user creates a `Solr` Custom Resource Object (CRO). + +2. `KubeDB` Provisioner operator watches the `Solr` CRO. + +3. When the operator finds a `Solr` CRO, it creates required number of `PetSets` and related necessary stuff like secrets, services, etc. + +4. Then, in order to set up autoscaling of the various components of the `Solr` database the user creates a `SolrAutoscaler` CRO with desired configuration. + +5. `KubeDB` Autoscaler operator watches the `SolrAutoscaler` CRO. + +6. `KubeDB` Autoscaler operator generates recommendation using the modified version of kubernetes [official recommender](https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler/pkg/recommender) for different components of the database, as specified in the `SolrAutoscaler` CRO. + +7. If the generated recommendation doesn't match the current resources of the database, then `KubeDB` Autoscaler operator creates a `SolrOpsRequest` CRO to scale the database to match the recommendation generated. + +8. `KubeDB` Ops-manager operator watches the `SolrOpsRequest` CRO. + +9. Then the `KubeDB` Ops-manager operator will scale the database component vertically as specified on the `SolrOpsRequest` CRO. + +In the next docs, we are going to show a step-by-step guide on Autoscaling of various Solr database components using `SolrAutoscaler` CRD. + diff --git a/docs/guides/solr/autoscaler/compute/topology.md b/docs/guides/solr/autoscaler/compute/topology.md new file mode 100644 index 0000000000..e7d528e910 --- /dev/null +++ b/docs/guides/solr/autoscaler/compute/topology.md @@ -0,0 +1,485 @@ +--- +title: Solr Topology Cluster Autoscaling +menu: + docs_{{ .version }}: + identifier: sl-auto-scaling-topology + name: Topology Cluster + parent: sl-compute-autoscaling-solr + weight: 15 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Autoscaling the Compute Resource of an Solr Topology Cluster + +This guide will show you how to use `KubeDB` to autoscale compute resources i.e. `cpu` and `memory` of an Solr topology cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Community, Enterprise and Autoscaler operator in your cluster following the steps [here](/docs/setup/README.md). + +- Install `Metrics Server` from [here](https://github.com/kubernetes-sigs/metrics-server#installation) + +- You should be familiar with the following `KubeDB` concepts: + - [Solr](/docs/guides/solr/concepts/solr.md) + - [SolrAutoscaler](/docs/guides/solr/concepts/autoscaler.md) + - [SolrOpsRequest](/docs/guides/solr/concepts/solropsrequests.md) + - [Compute Resource Autoscaling Overview](/docs/guides/solr/autoscaler/compute/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in this [directory](/docs/examples/solr/autoscaler/compute) of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Autoscaling of Topology Cluster + +Here, we are going to deploy an `Solr` topology cluster using a supported version by `KubeDB` operator. Then we are going to apply `SolrAutoscaler` to set up autoscaling. + +#### Deploy Solr Topology Cluster + +In this section, we are going to deploy an Solr topology with SolrVersion `9.4.1`. Then, in the next section we will set up autoscaling for this database using `SolrAutoscaler` CRD. Below is the YAML of the `Solr` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-cluster + namespace: demo +spec: + version: 9.4.1 + zookeeperRef: + name: zoo + namespace: demo + topology: + overseer: + replicas: 1 + storage: + storageClassName: longhorn + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + data: + replicas: 1 + storage: + storageClassName: longhorn + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + coordinator: + storage: + storageClassName: longhorn + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +``` + +Let's create the `Solr` CRD we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/autoscaler/topology.yaml +solr.kubedb.com/solr-cluster created +``` + +Now, wait until `solr-cluster` has status `Ready`. i.e, + +```bash +$ kubectl get sl -n demo +NAME TYPE VERSION STATUS AGE +solr-cluster kubedb.com/v1alpha2 9.4.1 Ready 82s +``` + +Let's check an data node containers resources, + +```bash +$ kubectl get pod -n demo solr-cluster-data-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "2Gi" + }, + "requests": { + "cpu": "900m", + "memory": "2Gi" + } +} +``` + +Let's check the Solr CR for the data node resources, + +```bash +$ kubectl get solr -n demo solr-cluster -o json | jq '.spec.topology.data.podTemplate.spec.containers[0].resources' +{ + "limits": { + "memory": "2Gi" + }, + "requests": { + "cpu": "900m", + "memory": "2Gi" + } +} +``` + +You can see from the above outputs that the resources are the same as the ones we have assigned while deploying the Solr. + +We are now ready to apply the `SolrAutoscaler` CRO to set up autoscaling for this database. + +### Compute Resource Autoscaling + +Here, we are going to set up compute resource autoscaling using a SolrAutoscaler Object. + +#### Create SolrAutoscaler Object + +In order to set up compute resource autoscaling for the data nodes of the cluster, we have to create a `SolrAutoscaler` CRO with our desired configuration. Below is the YAML of the `SolrAutoscaler` object that we are going to create, + +```yaml +apiVersion: autoscaling.kubedb.com/v1alpha1 +kind: SolrAutoscaler +metadata: + name: sl-data-autoscaler + namespace: demo +spec: + databaseRef: + name: solr-cluster + opsRequestOptions: + timeout: 5m + apply: IfReady + compute: + data: + trigger: "On" + podLifeTimeThreshold: 5m + resourceDiffPercentage: 5 + minAllowed: + cpu: 1 + memory: 2.5Gi + maxAllowed: + cpu: 2 + memory: 3Gi + controlledResources: ["cpu", "memory"] + containerControlledValues: "RequestsAndLimits" +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing compute resource scaling operation on `es-topology` cluster. +- `spec.compute.topology.data.trigger` specifies that compute autoscaling is enabled for the data nodes. +- `spec.compute.topology.data.podLifeTimeThreshold` specifies the minimum lifetime for at least one of the pod to initiate a vertical scaling. +- `spec.compute.topology.data.minAllowed` specifies the minimum allowed resources for the data nodes. +- `spec.compute.topology.data.maxAllowed` specifies the maximum allowed resources for the data nodes. +- `spec.compute.topology.data.controlledResources` specifies the resources that are controlled by the autoscaler. + +> Note: In this demo, we are only setting up the autoscaling for the data nodes, that's why we only specified the data section of the autoscaler. You can enable autoscaling for the master and the data nodes in the same YAML, by specifying the `topology.master` and `topology.data` section, similar to the `topology.data` section we have configured in this demo. + +Let's create the `SolrAutoscaler` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/autoscaler/compute/topology-scaler.yaml +solrautoscaler.autoscaling.kubedb.com/sl-data-autoscaler created +``` + +#### Verify Autoscaling is set up successfully + +Let's check that the `Solrautoscaler` resource is created successfully, + +```bash +$ kubectl get solrautoscaler -n demo +NAME AGE +sl-data-autoscaler 94s + +$ kubectl describe solrautoscaler -n demo sl-data-autoscaler +Name: sl-data-autoscaler +Namespace: demo +Labels: +Annotations: +API Version: autoscaling.kubedb.com/v1alpha1 +Kind: SolrAutoscaler +Metadata: + Creation Timestamp: 2024-10-29T13:39:52Z + Generation: 1 + Owner References: + API Version: kubedb.com/v1alpha2 + Block Owner Deletion: true + Controller: true + Kind: Solr + Name: solr-cluster + UID: b3fbf8b2-a05e-4502-be16-34b27b92a3ae + Resource Version: 890538 + UID: 6e95495b-2edf-4426-afa5-8de713bc3b2e +Spec: + Compute: + Data: + Container Controlled Values: RequestsAndLimits + Controlled Resources: + cpu + memory + Max Allowed: + Cpu: 2 + Memory: 3Gi + Min Allowed: + Cpu: 1 + Memory: 2.5Gi + Pod Life Time Threshold: 5m + Resource Diff Percentage: 5 + Trigger: On + Overseer: + Container Controlled Values: RequestsAndLimits + Controlled Resources: + cpu + memory + Max Allowed: + Cpu: 2 + Memory: 3Gi + Min Allowed: + Cpu: 1 + Memory: 2.5Gi + Pod Life Time Threshold: 5m + Resource Diff Percentage: 5 + Trigger: On + Database Ref: + Name: solr-cluster + Ops Request Options: + Apply: IfReady + Timeout: 5m +Status: + Checkpoints: + Cpu Histogram: + Bucket Weights: + Index: 0 + Weight: 10000 + Reference Timestamp: 2024-10-29T13:40:00Z + Total Weight: 0.09749048557222403 + First Sample Start: 2024-10-29T13:39:49Z + Last Sample Start: 2024-10-29T13:39:49Z + Last Update Time: 2024-10-29T13:40:07Z + Memory Histogram: + Reference Timestamp: 2024-10-29T13:45:00Z + Ref: + Container Name: solr + Vpa Object Name: solr-cluster-data + Total Samples Count: 1 + Version: v3 + Conditions: + Last Transition Time: 2024-10-29T13:40:35Z + Message: Successfully created solrOpsRequest demo/slops-solr-cluster-data-n3vjgi + Observed Generation: 1 + Reason: CreateOpsRequest + Status: True + Type: CreateOpsRequest + Vpas: + Conditions: + Last Transition Time: 2024-10-29T13:40:07Z + Status: True + Type: RecommendationProvided + Recommendation: + Container Recommendations: + Container Name: solr + Lower Bound: + Cpu: 1 + Memory: 2560Mi + Target: + Cpu: 1 + Memory: 2560Mi + Uncapped Target: + Cpu: 100m + Memory: 2162292018 + Upper Bound: + Cpu: 2 + Memory: 3Gi + Vpa Name: solr-cluster-data + Vpa Name: solr-cluster-overseer +Events: + +``` + +So, the `solrautoscaler` resource is created successfully. + + +As you can see from the output the vpa has generated a recommendation for the data node of the Solr cluster. Our autoscaler operator continuously watches the recommendation generated and creates an `Solropsrequest` based on the recommendations, if the Solr nodes are needed to be scaled up or down. + +Let's watch the `solropsrequest` in the demo namespace to see if any `solropsrequest` object is created. After some time you'll see that an `Solropsrequest` will be created based on the recommendation. + +```bash +$ kubectl get slops -n demo +NAME TYPE STATUS AGE +slops-solr-cluster-data-n3vjgi VerticalScaling Progressing 2m7s +``` + +Let's wait for the opsRequest to become successful. + +```bash +$ kubectl get slops -n demo +NAME TYPE STATUS AGE +slops-solr-cluster-data-n3vjgi VerticalScaling Successful 2m38s +``` + +We can see from the above output that the `SolrOpsRequest` has succeeded. If we describe the `SolrOpsRequest` we will get an overview of the steps that were followed to scale the database. + +```bash +$ kubectl describe slops -n demo slops-solr-cluster-data-n3vjgi +Name: slops-solr-cluster-data-n3vjgi +Namespace: demo +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=solr-cluster + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=solrs.kubedb.com +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: SolrOpsRequest +Metadata: + Creation Timestamp: 2024-10-29T13:40:35Z + Generation: 1 + Owner References: + API Version: autoscaling.kubedb.com/v1alpha1 + Block Owner Deletion: true + Controller: true + Kind: SolrAutoscaler + Name: sl-data-autoscaler + UID: 6e95495b-2edf-4426-afa5-8de713bc3b2e + Resource Version: 891009 + UID: e641e2cb-a3cd-4f34-8664-2d4a9079a93c +Spec: + Apply: IfReady + Database Ref: + Name: solr-cluster + Timeout: 5m0s + Type: VerticalScaling + Vertical Scaling: + Data: + Resources: + Limits: + Memory: 2560Mi + Requests: + Cpu: 1 + Memory: 2560Mi +Status: + Conditions: + Last Transition Time: 2024-10-29T13:40:35Z + Message: Solr ops-request has started to vertically scaling the Solr nodes + Observed Generation: 1 + Reason: VerticalScaling + Status: True + Type: VerticalScaling + Last Transition Time: 2024-10-29T13:40:38Z + Message: Successfully updated PetSets Resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-10-29T13:43:13Z + Message: Successfully Restarted Pods With Resources + Observed Generation: 1 + Reason: RestartPods + Status: True + Type: RestartPods + Last Transition Time: 2024-10-29T13:40:43Z + Message: get pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + Observed Generation: 1 + Status: True + Type: GetPod--solr-cluster-overseer-0 + Last Transition Time: 2024-10-29T13:40:43Z + Message: evict pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-cluster-overseer-0 + Last Transition Time: 2024-10-29T13:40:48Z + Message: running pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningPod + Last Transition Time: 2024-10-29T13:41:28Z + Message: get pod; ConditionStatus:True; PodName:solr-cluster-data-0 + Observed Generation: 1 + Status: True + Type: GetPod--solr-cluster-data-0 + Last Transition Time: 2024-10-29T13:41:28Z + Message: evict pod; ConditionStatus:True; PodName:solr-cluster-data-0 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-cluster-data-0 + Last Transition Time: 2024-10-29T13:42:23Z + Message: get pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + Observed Generation: 1 + Status: True + Type: GetPod--solr-cluster-coordinator-0 + Last Transition Time: 2024-10-29T13:42:23Z + Message: evict pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-cluster-coordinator-0 + Last Transition Time: 2024-10-29T13:43:13Z + Message: Successfully completed the vertical scaling for RabbitMQ + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 3m10s KubeDB Ops-manager Operator Start processing for SolrOpsRequest: demo/slops-solr-cluster-data-n3vjgi + Normal Starting 3m10s KubeDB Ops-manager Operator Pausing Solr databse: demo/solr-cluster + Normal Successful 3m10s KubeDB Ops-manager Operator Successfully paused Solr database: demo/solr-cluster for SolrOpsRequest: slops-solr-cluster-data-n3vjgi + Normal UpdatePetSets 3m7s KubeDB Ops-manager Operator Successfully updated PetSets Resources + Warning get pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 3m2s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + Warning evict pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 3m2s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + Warning running pod; ConditionStatus:False 2m57s KubeDB Ops-manager Operator running pod; ConditionStatus:False + Warning get pod; ConditionStatus:True; PodName:solr-cluster-data-0 2m17s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:solr-cluster-data-0 + Warning evict pod; ConditionStatus:True; PodName:solr-cluster-data-0 2m17s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:solr-cluster-data-0 + Warning get pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 82s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + Warning evict pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 82s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + Normal RestartPods 32s KubeDB Ops-manager Operator Successfully Restarted Pods With Resources + Normal Starting 32s KubeDB Ops-manager Operator Resuming Solr database: demo/solr-cluster + Normal Successful 32s KubeDB Ops-manager Operator Successfully resumed Solr database: demo/solr-cluster for SolrOpsRequest: slops-solr-cluster-data-n3vjgi +``` + +Now, we are going to verify from the Pod, and the Solr YAML whether the resources of the data node of the cluster has updated to meet up the desired state, Let's check, + +```bash +$ kubectl get pod -n demo solr-cluster-data-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "2560Mi" + }, + "requests": { + "cpu": "1", + "memory": "2560Mi" + } +} + +$ kubectl get solr -n demo solr-cluster -o json | jq '.spec.topology.data.podTemplate.spec.containers[0].resources' +{ + "limits": { + "memory": "2560Mi" + }, + "requests": { + "cpu": "1", + "memory": "2560Mi" + } +} + +``` + +The above output verifies that we have successfully auto-scaled the resources of the Solr topology cluster. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +$ kubectl delete solr -n demo solr-cluster +$ kubectl delete solrautoscaler -n demo sl-data-autoscaler +$ kubectl delete ns demo +``` \ No newline at end of file diff --git a/docs/guides/solr/autoscaler/storage/_index.md b/docs/guides/solr/autoscaler/storage/_index.md new file mode 100644 index 0000000000..50d564d997 --- /dev/null +++ b/docs/guides/solr/autoscaler/storage/_index.md @@ -0,0 +1,10 @@ +--- +title: Solr Storage Autoscaling +menu: + docs_{{ .version }}: + identifier: sl-storage-autoscaling-solr + name: Solr Storage Autoscaling + parent: sl-autoscaling-solr + weight: 14 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/solr/autoscaler/storage/combined.md b/docs/guides/solr/autoscaler/storage/combined.md new file mode 100644 index 0000000000..f2e3b979e3 --- /dev/null +++ b/docs/guides/solr/autoscaler/storage/combined.md @@ -0,0 +1,412 @@ +--- +title: Solr Storage Autoscaling Combined +menu: + docs_{{ .version }}: + identifier: sl-storage-autoscaling-combined + name: Solr Combined Autoscaling + parent: sl-storage-autoscaling-solr + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Storage Autoscaling of Solr Combined Cluster + +This guide will show you how to use `KubeDB` to autoscale the storage of an Solr combined cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Community, Enterprise and Autoscaler operator in your cluster following the steps [here](/docs/setup/README.md). + +- Install Prometheus from [here](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) + +- You must have a `StorageClass` that supports volume expansion. + +- You should be familiar with the following `KubeDB` concepts: + - [Solr](/docs/guides/solr/concepts/solr.md) + - [SolrAutoscaler](/docs/guides/solr/concepts/autoscaler.md) + - [SolrOpsRequest](/docs/guides/solr/concepts/solropsrequests.md) + - [Storage Autoscaling Overview](/docs/guides/solr/autoscaler/storage/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in this [directory](/docs/examples/solr/autoscaler/storage) of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Storage Autoscaling of Combined cluster + +At first verify that your cluster has a storage class, that supports volume expansion. Let's check, + +```bash +$ kubectl get sc +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +local-path (default) rancher.io/local-path Delete WaitForFirstConsumer false 11d +longhorn (default) driver.longhorn.io Delete Immediate true 7d21h +longhorn-static driver.longhorn.io Delete Immediate true 7d21h +``` + +We can see from the output the `longhorn` storage class has `ALLOWVOLUMEEXPANSION` field as true. So, this storage class supports volume expansion. We can use it. You can install longhorn from [here](https://longhorn.io/docs/1.7.2/deploy/install/install-with-kubectl/) + +Now, we are going to deploy a `Solr` combined cluster using a supported version by the `KubeDB` operator. Then we are going to apply `SolrAutoscaler` to set up autoscaling. + +#### Deploy Solr Combined Cluster + +In this section, we are going to deploy a solr combined cluster with version `9.6.1`. Then, in the next section we will set up autoscaling for this database using `SolrAutoscaler` CRD. Below is the YAML of the `Solr` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-combined + namespace: demo +spec: + version: 9.6.1 + replicas: 2 + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: longhorn +``` + +Let's create the `Solr` CRD we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/autoscaler/storage/combined-scaler.yaml +solr.kubedb.com/solr-combined created +``` + +Now, wait until `solr-combined` has status `Ready`. i.e, + +```bash +$ kubectl get sl -n demo +NAME TYPE VERSION STATUS AGE +solr-combined kubedb.com/v1alpha2 9.6.1 Ready 17m + +``` + +Let's check volume size from petset, and from the persistent volume, + +```bash +$ kubectl get petset -n demo solr-combined -o json | jq '.spec.volumeClaimTemplates[].spec.resources' +{ + "requests": { + "storage": "1Gi" + } +} + + +$ kubectl get pv -n demo +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE +pvc-ceee299c-5c50-4f5c-83d5-97e2423bf286 7332Mi RWO Delete Bound demo/solr-combined-data-solr-combined-1 longhorn 19m +pvc-d9c2f7c1-7c27-48bd-a87e-cb1935cc2e61 7332Mi RWO Delete Bound demo/solr-combined-data-solr-combined-0 longhorn 19m +``` + +You can see the PetSet has 1GB storage, and the capacity of the persistent volume is also 1GB. + +We are now ready to apply the `SolrAutoscaler` CRO to set up storage autoscaling for this database. + +### Storage Autoscaling + +Here, we are going to set up storage autoscaling using a SolrAutoscaler Object. + +#### Create SolrAutoscaler Object + +To set up vertical autoscaling for the combined cluster nodes, we have to create a `SolrAutoscaler` CRO with our desired configuration. Below is the YAML of the `SolrAutoscaler` object that we are going to create, + +```yaml +apiVersion: autoscaling.kubedb.com/v1alpha1 +kind: SolrAutoscaler +metadata: + name: sl-storage-autoscaler-combined + namespace: demo +spec: + databaseRef: + name: solr-combined + storage: + node: + expansionMode: "Offline" + trigger: "On" + usageThreshold: 60 + scalingThreshold: 100 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing vertical scaling operation on `es-combined` cluster. +- `spec.storage.node.trigger` specifies that storage autoscaling is enabled for the Solr nodes. +- `spec.storage.node.usageThreshold` specifies storage usage threshold, if storage usage exceeds `60%` then storage autoscaling will be triggered. +- `spec.storage.node.scalingThreshold` specifies the scaling threshold. Storage will be scaled to `50%` of the current amount. + +Let's create the `SolrAutoscaler` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/autoscaler/storage/combined-scaler.yaml +solrautoscaler.autoscaling.kubedb.com/sl-storage-autoscaler-combined created +``` + +#### Storage Autoscaling is set up successfully + +Let's check that the `Solrautoscaler` resource is created successfully, + +```bash +$ kubectl get solrautoscaler -n demo +NAME AGE +sl-storage-autoscaler-combined 20m + + +$ kubectl describe solrautoscaler -n demo sl-storage-autoscaler-combined +Name: sl-storage-autoscaler-combined +Namespace: demo +Labels: +Annotations: +API Version: autoscaling.kubedb.com/v1alpha1 +Kind: SolrAutoscaler +Metadata: + Creation Timestamp: 2024-10-30T05:57:51Z + Generation: 1 + Owner References: + API Version: kubedb.com/v1alpha2 + Block Owner Deletion: true + Controller: true + Kind: Solr + Name: solr-combined + UID: 2f180d2f-27ef-4f94-8563-83fc0ae2bf66 + Resource Version: 971668 + UID: f2d12f05-790a-40ad-97b1-28f890a45dd7 +Spec: + Database Ref: + Name: solr-combined + Ops Request Options: + Apply: IfReady + Storage: + Node: + Expansion Mode: Offline + Scaling Rules: + Applies Upto: + Threshold: 100pc + Scaling Threshold: 100 + Trigger: On + Usage Threshold: 10 +Status: + Conditions: + Last Transition Time: 2024-10-30T06:11:43Z + Message: Successfully created solrOpsRequest demo/slops-solr-combined-gzqvx7 + Observed Generation: 1 + Reason: CreateOpsRequest + Status: True + Type: CreateOpsRequest +Events: +``` + +So, the `solrautoscaler` resource is created successfully. + +Now, for this demo, we are going to manually fill up the persistent volume to exceed the `usageThreshold` using `dd` command to see if storage autoscaling is working or not. + +Let's exec into the database pod and fill the database volume using the following commands: + +```bash +$ kubectl exec -it -n demo solr-combined-0 -- bash +Defaulted container "solr" out of: solr, init-solr (init) +solr@solr-combined-0:/opt/solr-9.6.1$ df -h /var/solr/data +Filesystem Size Used Avail Use% Mounted on +/dev/longhorn/pvc-d9c2f7c1-7c27-48bd-a87e-cb1935cc2e61 7.1G 601M 6.5G 9% /var/solr/data + +[root@es-combined-0 Solr]# dd if=/dev/zero of=/var/solrdata/file.img bs=300M count=2 +1+0 records in +1+0 records out +629145600 bytes (629 MB) copied, 1.95767 s, 321 MB/s + +[root@es-combined-0 Solr]# df -h /usr/share/Solr/data +Filesystem Size Used Avail Use% Mounted on +/dev/longhorn/pvc-d9c2f7c1-7c27-48bd-a87e-cb1935cc2e61 7.1G 601M 6.5G 63% /var/solr/data +``` + +So, from the above output, we can see that the storage usage is 64%, which exceeded the `usageThreshold` 60%. + +Let's watch the `solropsrequest` in the demo namespace to see if any `solropsrequest` object is created. After some time you'll see that a `Solropsrequest` of type `VolumeExpansion` will be created based on the `scalingThreshold`. + +```bash +$ kubectl get slops -n demo +NAME TYPE STATUS AGE +slops-solr-combined-gzqvx7 VolumeExpansion Progressing 9m42s +``` + +Let's wait for the opsRequest to become successful. + +```bash +$ kubectl get esops -n demo +NAME TYPE STATUS AGE +slops-solr-combined-gzqvx7 VolumeExpansion Successful 19m +``` + +We can see from the above output that the `SolrOpsRequest` has succeeded. If we describe the `SolrOpsRequest` we will get an overview of the steps that were followed to expand the volume of the database. + +```bash +$ kubectl describe slops -n demo slops-solr-combined-gzqvx7 +Name: slops-solr-combined-gzqvx7 +Namespace: demo +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=solr-combined + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=solrs.kubedb.com +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: SolrOpsRequest +Metadata: + Creation Timestamp: 2024-10-30T06:11:43Z + Generation: 1 + Owner References: + API Version: autoscaling.kubedb.com/v1alpha1 + Block Owner Deletion: true + Controller: true + Kind: SolrAutoscaler + Name: sl-storage-autoscaler-combined + UID: f2d12f05-790a-40ad-97b1-28f890a45dd7 + Resource Version: 972599 + UID: 662a363f-9ce9-4c93-b2eb-8200c23978f6 +Spec: + Apply: IfReady + Database Ref: + Name: solr-combined + Type: VolumeExpansion + Volume Expansion: + Mode: Offline + Node: 7687602176 +Status: + Conditions: + Last Transition Time: 2024-10-30T06:11:43Z + Message: Solr ops-request has started to expand volume of solr nodes. + Observed Generation: 1 + Reason: VolumeExpansion + Status: True + Type: VolumeExpansion + Last Transition Time: 2024-10-30T06:12:01Z + Message: successfully deleted the petSets with orphan propagation policy + Observed Generation: 1 + Reason: OrphanPetSetPods + Status: True + Type: OrphanPetSetPods + Last Transition Time: 2024-10-30T06:11:51Z + Message: get petset; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPetset + Last Transition Time: 2024-10-30T06:11:51Z + Message: delete petset; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DeletePetset + Last Transition Time: 2024-10-30T06:16:26Z + Message: successfully updated combined node PVC sizes + Observed Generation: 1 + Reason: VolumeExpansionCombinedNode + Status: True + Type: VolumeExpansionCombinedNode + Last Transition Time: 2024-10-30T06:12:06Z + Message: get pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPod + Last Transition Time: 2024-10-30T06:12:06Z + Message: patch ops request; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchOpsRequest + Last Transition Time: 2024-10-30T06:12:06Z + Message: delete pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DeletePod + Last Transition Time: 2024-10-30T06:12:11Z + Message: get pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPvc + Last Transition Time: 2024-10-30T06:12:11Z + Message: patch pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchPvc + Last Transition Time: 2024-10-30T06:16:06Z + Message: compare storage; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CompareStorage + Last Transition Time: 2024-10-30T06:14:01Z + Message: create pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CreatePod + Last Transition Time: 2024-10-30T06:16:11Z + Message: running solr; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningSolr + Last Transition Time: 2024-10-30T06:16:31Z + Message: successfully reconciled the Solr resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-10-30T06:16:36Z + Message: PetSet is recreated + Observed Generation: 1 + Reason: ReadyPetSets + Status: True + Type: ReadyPetSets + Last Transition Time: 2024-10-30T06:16:36Z + Message: get pet set; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPetSet + Last Transition Time: 2024-10-30T06:16:36Z + Message: Successfully completed volumeExpansion for Solr + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +``` + +Now, we are going to verify from the `Petset`, and the `Persistent Volume` whether the volume of the combined cluster has expanded to meet the desired state, Let's check, + +```bash +$ kubectl get petset -n demo solr-combined -o json | jq '.spec.volumeClaimTemplates[].spec.resources' +{ + "requests": { + "storage": "7687602176" + } +} + + +$ kubectl get pv -n demo +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE +pvc-ceee299c-5c50-4f5c-83d5-97e2423bf286 7332Mi RWO Delete Bound demo/solr-combined-data-solr-combined-1 longhorn 26m +pvc-d9c2f7c1-7c27-48bd-a87e-cb1935cc2e61 7332Mi RWO Delete Bound demo/solr-combined-data-solr-combined-0 longhorn +``` + +The above output verifies that we have successfully autoscaler the volume of the Solr combined cluster. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +$ kubectl delete solr -n demo solr-combined +$ kubectl delete solrautoscaler -n demo sl-storage-autoscaler-combined +``` diff --git a/docs/guides/solr/autoscaler/storage/overview.md b/docs/guides/solr/autoscaler/storage/overview.md new file mode 100644 index 0000000000..02be157a14 --- /dev/null +++ b/docs/guides/solr/autoscaler/storage/overview.md @@ -0,0 +1,53 @@ +--- +title: Solr Storage Autoscaling Overview +menu: + docs_{{ .version }}: + identifier: sl-storage-autoscaling-overview + name: Overview + parent: sl-storage-autoscaling-solr + weight: 5 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Solr Storange Autoscaling + +This guide will give an overview on how KubeDB Autoscaler operator autoscales the Solr storage using `Solrautoscaler` crd. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [Solr](/docs/guides/solr/concepts/solr.md) + - [SolrAutoscaler](/docs/guides/solr/concepts/autoscaler.md) + - [SolrOpsRequest](/docs/guides/solr/concepts/solropsrequests.md) + +## How Storage Autoscaling Works + +The Auto Scaling process consists of the following steps: + +

+  Compute Autoscaling Flow +

+ +1. At first, a user creates a `Solr` Custom Resource (CR). + +2. `KubeDB` Provisioner operator watches the `Solr` CR. + +3. When the operator finds a `Solr` CR, it creates required number of `PetSets` and related necessary stuff like secrets, services, etc. + +- Each PetSet creates a Persistent Volume according to the Volume Claim Template provided in the petset configuration. + +4. Then, in order to set up storage autoscaling of the various components of the `Solr` database the user creates a `SolrAutoscaler` CRO with desired configuration. + +5. `KubeDB` Autoscaler operator watches the `SolrAutoscaler` CRO. + +6. `KubeDB` Autoscaler operator continuously watches persistent volumes of the databases to check if it exceeds the specified usage threshold. +- If the usage exceeds the specified usage threshold, then `KubeDB` Autoscaler operator creates a `SolrOpsRequest` to expand the storage of the database. + +7. `KubeDB` Ops-manager operator watches the `SolrOpsRequest` CRO. + +8. Then the `KubeDB` Ops-manager operator will expand the storage of the database component as specified on the `SolrOpsRequest` CRO. + +In the next docs, we are going to show a step-by-step guide on Autoscaling storage of various Solr database components using `SolrAutoscaler` CRD. diff --git a/docs/guides/solr/autoscaler/storage/topology.md b/docs/guides/solr/autoscaler/storage/topology.md new file mode 100644 index 0000000000..0e4c23d0d7 --- /dev/null +++ b/docs/guides/solr/autoscaler/storage/topology.md @@ -0,0 +1,445 @@ +--- +title: Solr Storage Autoscaling Topology +menu: + docs_{{ .version }}: + identifier: sl-storage-autoscaling-topology + name: Solr Topology Autoscaling + parent: sl-storage-autoscaling-solr + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Storage Autoscaling of Solr Topology Cluster + +This guide will show you how to use `KubeDB` to autoscale the storage of a solr topology cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Community, Enterprise and Autoscaler operator in your cluster following the steps [here](/docs/setup/README.md). + +- Install Prometheus from [here](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) + +- You must have a `StorageClass` that supports volume expansion. + +- You should be familiar with the following `KubeDB` concepts: + - [Solr](/docs/guides/solr/concepts/solr.md) + - [SolrAutoscaler](/docs/guides/solr/concepts/autoscaler.md) + - [SolrOpsRequest](/docs/guides/solr/concepts/solropsrequests.md) + - [Storage Autoscaling Overview](/docs/guides/solr/autoscaler/storage/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in this [directory](/docs/examples/solr/autoscaler/storage) of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Storage Autoscaling of Topology Cluster + +At first verify that your cluster has a storage class, that supports volume expansion. Let's check, + +```bash +$ kubectl get sc +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +local-path (default) rancher.io/local-path Delete WaitForFirstConsumer false 11d +longhorn (default) driver.longhorn.io Delete Immediate true 7d22h +longhorn-static driver.longhorn.io Delete Immediate true 7d22h + +``` + +We can see from the output the `longhorn` storage class has `ALLOWVOLUMEEXPANSION` field as true. So, this storage class supports volume expansion. We can use it. You can install topolvm from [here](https://github.com/topolvm/topolvm) + +Now, we are going to deploy a `Solr` topology cluster using a supported version by the `KubeDB` operator. Then we are going to apply `SolrAutoscaler` to set up autoscaling. + +#### Deploy Solr Topology + +In this section, we are going to deploy a Solr topology cluster with version `9.6.1`. Then, in the next section we will set up autoscaling for this database using `SolrAutoscaler` CRD. Below is the YAML of the `Solr` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-cluster + namespace: demo +spec: + version: 9.6.1 + zookeeperRef: + name: zoo + namespace: demo + topology: + overseer: + replicas: 1 + storage: + storageClassName: longhorn + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + data: + replicas: 1 + storage: + storageClassName: longhorn + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + coordinator: + storage: + storageClassName: longhorn + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + +``` + +Let's create the `Solr` CRO we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/autoscaler/topology.yaml +Solr.kubedb.com/es-topology created +``` + +Now, wait until `solr-cluster` has status `Ready`. i.e, + +```bash + $ kubectl get sl -n demo +NAME TYPE VERSION STATUS AGE +solr-cluster kubedb.com/v1alpha2 9.6.1 Ready 83s + +``` + +Let's check volume size from the data petset, and from the persistent volume, + +```bash +$ kubectl get petset -n demo solr-cluster-data -o json | jq '.spec.volumeClaimTemplates[].spec.resources' +{ + "requests": { + "storage": "1Gi" + } +} + +$ kubectl get pv -n demo +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE +pvc-24431af2-8df5-4ad2-a6cd-795dcbdc6355 1Gi RWO Delete Bound demo/solr-cluster-data-solr-cluster-coordinator-0 longhorn 2m15s +pvc-5e3430da-545c-4234-a891-3385b100401d 1Gi RWO Delete Bound demo/solr-cluster-data-solr-cluster-overseer-0 longhorn 2m17s +pvc-aa75a15f-94cd-475a-a7ad-498023830020 1Gi RWO Delete Bound demo/solr-cluster-data-solr-cluster-data-0 longhorn 2m19s + +``` + +You can see that the data PetSet has 1GB storage, and the capacity of all the persistent volume is also 1GB. + +We are now ready to apply the `SolrAutoscaler` CRO to set up storage autoscaling for the data nodes. + +### Storage Autoscaling + +Here, we are going to set up storage autoscaling using an SolrAutoscaler Object. + +#### Create SolrAutoscaler Object + +To set up vertical autoscaling for this topology cluster, we have to create a `SolrAutoscaler` CRO with our desired configuration. Below is the YAML of the `SolrAutoscaler` object that we are going to create, + +```yaml +apiVersion: autoscaling.kubedb.com/v1alpha1 +kind: SolrAutoscaler +metadata: + name: sl-storage-autoscaler-topology + namespace: demo +spec: + databaseRef: + name: solr-cluster + storage: + data: + expansionMode: "Offline" + trigger: "On" + usageThreshold: 60 + scalingThreshold: 100 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing vertical scaling operation on `es-topology` cluster. +- `spec.storage.topology.data.trigger` specifies that storage autoscaling is enabled for data nodes. +- `spec.storage.topology.data.usageThreshold` specifies storage usage threshold, if storage usage exceeds `60%` then storage autoscaling will be triggered. +- `spec.storage.topology.data.scalingThreshold` specifies the scaling threshold. Storage will be scaled to `50%` of the current amount. + +> Note: In this demo we are only setting up the storage autoscaling for the data nodes, that's why we only specified the data section of the autoscaler. You can enable autoscaling for master nodes and ingest nodes in the same YAML, by specifying the `topology.master` and `topology.ingest` respectivly. + +Let's create the `SolrAutoscaler` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/autoscaler/storage/topology-scaler.yaml +solrautoscaler.autoscaling.kubedb.com/sl-storage-autoscaler-topology created +``` + +#### Storage Autoscaling is set up successfully + +Let's check that the `solrautoscaler` resource is created successfully, + +```bash +$ kubectl get solrautoscaler -n demo +NAME AGE +sl-storage-autoscaler-topology 70s + +$ kubectl describe solrautoscaler -n demo sl-storage-autoscaler-topology +Name: sl-storage-autoscaler-topology +Namespace: demo +Labels: +Annotations: +API Version: autoscaling.kubedb.com/v1alpha1 +Kind: SolrAutoscaler +Metadata: + Creation Timestamp: 2024-10-30T06:55:55Z + Generation: 1 + Owner References: + API Version: kubedb.com/v1alpha2 + Block Owner Deletion: true + Controller: true + Kind: Solr + Name: solr-cluster + UID: 0820762a-3b96-44db-8157-f1857bed410e + Resource Version: 976749 + UID: 8a5b2ca2-3fa1-4e22-9b4b-bde4a163aa08 +Spec: + Database Ref: + Name: solr-cluster + Ops Request Options: + Apply: IfReady + Storage: + Data: + Expansion Mode: Offline + Scaling Rules: + Applies Upto: + Threshold: 100pc + Scaling Threshold: 100 + Trigger: On + Usage Threshold: 60 +Events: + + +``` + +So, the `solrautoscaler` resource is created successfully. + +Now, for this demo, we are going to manually fill up one of the persistent volume to exceed the `usageThreshold` using `dd` command to see if storage autoscaling is working or not. + +Let's exec into the data nodes and fill the database volume using the following commands: + +```bash + $ kubectl exec -it -n demo solr-cluster-data-0 -- bash +Defaulted container "solr" out of: solr, init-solr (init) +solr@solr-combined-0:/opt/solr-9.6.1$ df -h /var/solr/data +Filesystem Size Used Avail Use% Mounted on +/dev/longhorn/pvc-d9c2f7c1-7c27-48bd-a87e-cb1935cc2e61 7.1G 601M 6.5G 9% /var/solr/data +solr@solr-cluster-data-0:/opt/solr-9.6.1$ dd if=/dev/zero of=/var/solr/data/file.img bs=300M count=2 +2+0 records in +2+0 records out +629145600 bytes (629 MB, 600 MiB) copied, 1.95395 s, 322 MB/s +solr@solr-cluster-data-0:/opt/solr-9.6.1$ df -h /var/solr/data +Filesystem Size Used Avail Use% Mounted on +/dev/longhorn/pvc-aa75a15f-94cd-475a-a7ad-498023830020 974M 601M 358M 63% /var/solr/data + +``` + +So, from the above output we can see that the storage usage is 69%, which exceeded the `usageThreshold` 60%. + +Let's watch the `solropsrequest` in the demo namespace to see if any `solropsrequest` object is created. After some time you'll see that an `solropsrequest` of type `VolumeExpansion` will be created based on the `scalingThreshold`. + +```bash +$ kubectl get slops -n demo +NAME TYPE STATUS AGE +slops-solr-cluster-0s6kgw VolumeExpansion Progressing 95s +``` + +Let's wait for the opsRequest to become successful. + +```bash +$ kubectl get slops -n demo +NAME TYPE STATUS AGE +slops-solr-cluster-0s6kgw VolumeExpansion Successful 2m58s +``` + +We can see from the above output that the `solrOpsRequest` has succeeded. If we describe the `solrOpsRequest` we will get an overview of the steps that were followed to expand the volume of the database. + +```bash +$ kubectl describe slops -n demo slops-solr-cluster-0s6kgw +Name: slops-solr-cluster-0s6kgw +Namespace: demo +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=solr-cluster + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=solrs.kubedb.com +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: SolrOpsRequest +Metadata: + Creation Timestamp: 2024-10-30T06:58:43Z + Generation: 1 + Owner References: + API Version: autoscaling.kubedb.com/v1alpha1 + Block Owner Deletion: true + Controller: true + Kind: SolrAutoscaler + Name: sl-storage-autoscaler-topology + UID: 8a5b2ca2-3fa1-4e22-9b4b-bde4a163aa08 + Resource Version: 977641 + UID: 5411ed48-b2fe-40a2-b1e4-2d3d659668b1 +Spec: + Apply: IfReady + Database Ref: + Name: solr-cluster + Type: VolumeExpansion + Volume Expansion: + Data: 2041405440 + Mode: Offline +Status: + Conditions: + Last Transition Time: 2024-10-30T06:58:43Z + Message: Solr ops-request has started to expand volume of solr nodes. + Observed Generation: 1 + Reason: VolumeExpansion + Status: True + Type: VolumeExpansion + Last Transition Time: 2024-10-30T06:59:01Z + Message: successfully deleted the petSets with orphan propagation policy + Observed Generation: 1 + Reason: OrphanPetSetPods + Status: True + Type: OrphanPetSetPods + Last Transition Time: 2024-10-30T06:58:51Z + Message: get petset; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPetset + Last Transition Time: 2024-10-30T06:58:51Z + Message: delete petset; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DeletePetset + Last Transition Time: 2024-10-30T07:01:16Z + Message: successfully updated data node PVC sizes + Observed Generation: 1 + Reason: VolumeExpansionDataNode + Status: True + Type: VolumeExpansionDataNode + Last Transition Time: 2024-10-30T06:59:06Z + Message: get pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPod + Last Transition Time: 2024-10-30T06:59:06Z + Message: patch ops request; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchOpsRequest + Last Transition Time: 2024-10-30T06:59:06Z + Message: delete pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DeletePod + Last Transition Time: 2024-10-30T06:59:11Z + Message: get pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPvc + Last Transition Time: 2024-10-30T06:59:11Z + Message: patch pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchPvc + Last Transition Time: 2024-10-30T07:00:56Z + Message: compare storage; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CompareStorage + Last Transition Time: 2024-10-30T07:00:56Z + Message: create pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CreatePod + Last Transition Time: 2024-10-30T07:01:01Z + Message: running solr; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningSolr + Last Transition Time: 2024-10-30T07:01:21Z + Message: successfully reconciled the Solr resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-10-30T07:01:26Z + Message: PetSet is recreated + Observed Generation: 1 + Reason: ReadyPetSets + Status: True + Type: ReadyPetSets + Last Transition Time: 2024-10-30T07:01:26Z + Message: get pet set; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPetSet + Last Transition Time: 2024-10-30T07:01:26Z + Message: Successfully completed volumeExpansion for Solr + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 3m39s KubeDB Ops-manager Operator Start processing for SolrOpsRequest: demo/slops-solr-cluster-0s6kgw + Normal Starting 3m39s KubeDB Ops-manager Operator Pausing Solr databse: demo/solr-cluster + Normal Successful 3m39s KubeDB Ops-manager Operator Successfully paused Solr database: demo/solr-cluster for SolrOpsRequest: slops-solr-cluster-0s6kgw + Warning get petset; ConditionStatus:True 3m31s KubeDB Ops-manager Operator get petset; ConditionStatus:True + Warning delete petset; ConditionStatus:True 3m31s KubeDB Ops-manager Operator delete petset; ConditionStatus:True + Warning get petset; ConditionStatus:True 3m26s KubeDB Ops-manager Operator get petset; ConditionStatus:True + +``` + +Now, we are going to verify from the `Petset`, and the `Persistent Volume` whether the volume of the data nodes of the cluster has expanded to meet the desired state, Let's check, + +```bash +$ kubectl get petset -n demo solr-cluster-data -o json | jq '.spec.volumeClaimTemplates[].spec.resources' +{ + "requests": { + "storage": "2041405440" + } +} + + +$ kubectl get pvc -n demo +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE +solr-cluster-data-solr-cluster-coordinator-0 Bound pvc-24431af2-8df5-4ad2-a6cd-795dcbdc6355 1Gi RWO longhorn 18m +solr-cluster-data-solr-cluster-data-0 Bound pvc-aa75a15f-94cd-475a-a7ad-498023830020 1948Mi RWO longhorn 18m +solr-cluster-data-solr-cluster-overseer-0 Bound pvc-5e3430da-545c-4234-a891-3385b100401d 1Gi RWO longhorn 18m + +$ kubectl get pv -n demo +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE +pvc-24431af2-8df5-4ad2-a6cd-795dcbdc6355 1Gi RWO Delete Bound demo/solr-cluster-data-solr-cluster-coordinator-0 longhorn 18m +pvc-5e3430da-545c-4234-a891-3385b100401d 1Gi RWO Delete Bound demo/solr-cluster-data-solr-cluster-overseer-0 longhorn 18m +pvc-aa75a15f-94cd-475a-a7ad-498023830020 1948Mi RWO Delete Bound demo/solr-cluster-data-solr-cluster-data-0 longhorn 18m +``` + +The above output verifies that we have successfully autoscaler the volume of the data nodes of this Solr topology cluster. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +$ kubectl delete Solr -n demo solr-cluster +$ kubectl delete solrautoscaler -n demo sl-storage-autoscaler-topology +``` diff --git a/docs/guides/solr/clustering/_index.md b/docs/guides/solr/clustering/_index.md new file mode 100644 index 0000000000..2ba74db20c --- /dev/null +++ b/docs/guides/solr/clustering/_index.md @@ -0,0 +1,10 @@ +--- +title: Solr Clustering +menu: + docs_{{ .version }}: + identifier: sl-clustering-solr + name: Clustering + parent: sl-solr-guides + weight: 16 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/solr/clustering/combined_cluster.md b/docs/guides/solr/clustering/combined_cluster.md new file mode 100644 index 0000000000..0a60730cb0 --- /dev/null +++ b/docs/guides/solr/clustering/combined_cluster.md @@ -0,0 +1,342 @@ +--- +title: Combined Cluster +menu: + docs_{{ .version }}: + identifier: sl-combined-solr + name: Combined Cluster + parent: sl-clustering-solr + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Combined Cluster + +An Solr combined cluster is a group of one or more Solr nodes where each node can perform as overseer, data, and coordinator nodes simultaneously. + +## Before You Begin + +At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +Now, install the KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create namespace demo +namespace/demo created + +$ kubectl get namespace +NAME STATUS AGE +demo Active 9s +``` + +> Note: YAML files used in this tutorial are stored in [here](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/guides/solr/yamls) in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Create Standalone Solr Cluster + +The KubeDB Solr runs in `solrcloud` mode. Hence, it needs a external zookeeper to distribute replicas among pods and save configurations. + +We will use KubeDB ZooKeeper for this purpose. + +The ZooKeeper instance used for this tutorial: + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: ZooKeeper +metadata: + name: zoo-com + namespace: demo +spec: + version: 3.8.3 + replicas: 3 + deletionPolicy: Delete + adminServerPort: 8080 + storage: + resources: + requests: + storage: "100Mi" + storageClassName: standard + accessModes: + - ReadWriteOnce +``` + +We have to apply zookeeper first and wait till atleast pods are running to make sure that a cluster has been formed. + +Here, + +- `spec.version` - is the name of the ZooKeeperVersion CR. Here, a ZooKeeper of version `3.8.3` will be created. +- `spec.replicas` - specifies the number of ZooKeeper nodes. +- `spec.storageType` - specifies the type of storage that will be used for ZooKeeper database. It can be `Durable` or `Ephemeral`. The default value of this field is `Durable`. If `Ephemeral` is used then KubeDB will create the ZooKeeper database using `EmptyDir` volume. In this case, you don't have to specify `spec.storage` field. This is useful for testing purposes. +- `spec.storage` specifies the StorageClass of PVC dynamically allocated to store data for this database. This storage spec will be passed to the Petsets created by the KubeDB operator to run database pods. You can specify any StorageClass available in your cluster with appropriate resource requests. If you don't specify `spec.storageType: Ephemeral`, then this field is required. +- `spec.deletionPolicy` specifies what KubeDB should do when a user try to delete ZooKeeper CR. Deletion policy `Delete` will delete the database pods, secret and PVC when the ZooKeeper CR is deleted. Checkout the [link](/docs/guides/zookeeper/concepts/zookeeper.md#specdeletionpolicy) for details. + +> Note: `spec.storage` section is used to create PVC for database pod. It will create PVC with storage size specified in the `storage.resources.requests` field. Don't specify `limits` here. PVC does not get resized automatically. + +Let's create the ZooKeeper CR that is shown above: + +```bash +$ $ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/solr/quickstart/overview/yamls/zookeeper/zookeeper.yaml +zooKeeper.kubedb.com/zoo-com created +``` + +The ZooKeeper's `STATUS` will go from `Provisioning` to `Ready` state within few minutes. Once the `STATUS` is `Ready`, you are ready to use the database. + +```bash +$ kubectl get ZooKeeper -n demo -w +NAME TYPE VERSION STATUS AGE +zoo-com kubedb.com/v1alpha2 3.7.2 Ready 13m + +Here, we are going to create a standalone (ie. `replicas: 1`) Solr cluster. We will use the Solr image provided by the Solr (`9.6.1`) for this demo. To learn more about Solr CR, visit [here](/docs/guides/solr/concepts/solr.md). + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-combined + namespace: demo +spec: + version: 9.6.1 + deletionPolicy: DoNotTerminate + replicas: 2 + enableSSL: true + zookeeperRef: + name: zoo-com + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard +``` + +Let's deploy the above example by the following command: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/solr/clustering/yamls/combined-standalone.yaml +solr.kubedb.com/solr-combined created +``` + +Watch the bootstrap progress: + +```bash +$ kubectl get sl -n demo +NAME TYPE VERSION STATUS AGE +solr-combined kubedb.com/v1alpha2 9.6.1 Ready 3h37m + +``` + +Hence the cluster is ready to use. +Let's check the k8s resources created by the operator on the deployment of Elasticsearch CRO: + +```bash +$ kubectl get all,secret,pvc -n demo -l 'app.kubernetes.io/instance=solr-combined' +NAME READY STATUS RESTARTS AGE +pod/solr-combined-0 1/1 Running 0 75s + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/solr-combined ClusterIP 10.96.247.33 8983/TCP 78s +service/solr-combined-pods ClusterIP None 8983/TCP 78s + +NAME TYPE VERSION AGE +appbinding.appcatalog.appscode.com/solr-combined kubedb.com/solr 9.6.1 78s + +NAME TYPE DATA AGE +secret/solr-combined-auth kubernetes.io/basic-auth 2 78s +secret/solr-combined-auth-config Opaque 1 78s +secret/solr-combined-client-cert kubernetes.io/tls 5 78s +secret/solr-combined-config Opaque 1 78s +secret/solr-combined-keystore-cred Opaque 1 78s +secret/solr-combined-server-cert kubernetes.io/tls 5 78s +secret/solr-combined-zk-digest kubernetes.io/basic-auth 2 78s +secret/solr-combined-zk-digest-readonly kubernetes.io/basic-auth 2 78s + +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +persistentvolumeclaim/solr-combined-data-solr-combined-0 Bound pvc-c073b8b8-9005-41c5-ac21-bd060a5214a1 1Gi RWO standard 75s +``` + +- `PetSet` - a PetSet(Appscode manages customized petset) named after the Solr instance. In topology mode, the operator creates 3 PetSets with name `{Solr-Name}-{Sufix}`. +- `Services` - 2 services are generated for each Solr database. + - `{Solr-Name}` - the client service which is used to connect to the database. It points to the `overseer` nodes. + - `{Solr-Name}-pods` - the node discovery service which is used by the Solr nodes to communicate each other. It is a headless service. +- `AppBinding` - an [AppBinding](/docs/guides/solr/concepts/appbinding.md) which hold to connect information for the database. It is also named after the solr instance. +- `Secrets` - 3 types of secrets are generated for each Solr database. + - `{Solr-Name}-auth` - the auth secrets which hold the `username` and `password` for the solr users. The auth secret `solr-combined-admin-cred` holds the `username` and `password` for `admin` user which lets administrative access. + - `{Solr-Name}-config` - the default configuration secret created by the operator. + - `{Solr-Name}-auth-config` - the configuration secret of admin user information created by the operator. + - `{Solr-Name}-zk-digest` - the auth secret which contains the `username` and `password` for zookeeper digest secret which is able to access zookeeper data. + - `{Solr-Name}-zk-digest-readonly` - the auth secret which contains the `username` and `password` for zookeeper readonly digest secret which is able to read zookeeper data. + + +## Connect with Solr Database + +We will use [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to connect with our Solr database. Then we will use `curl` to send `HTTP` requests to check cluster health to verify that our Solr database is working well. + +Let's port-forward the port `8983` to local machine: + +```bash +$ kubectl port-forward -n demo svc/solr-combined 8983 +Forwarding from 127.0.0.1:8983 -> 8983 +Forwarding from [::1]:8983 -> 8983 +``` + +Now, our Solr cluster is accessible at `localhost:8983`. + +**Connection information:** + +- Address: `localhost:8983` +- Username: + + ```bash + $ kubectl get secret -n demo solr-combined-admin-cred -o jsonpath='{.data.username}' | base64 -d + admin + ``` + +- Password: + + ```bash + $ kubectl get secret -n demo solr-combined-admin-cred -o jsonpath='{.data.password}' | base64 -d + Xy3ZjyU)~(9IO8_n + ``` + +Now let's check the health of our Solr database. + +```bash +$ curl -XGET -k -u 'admin:Xy3ZjyU)~(9IO8_n' "http://localhost:8983/solr/admin/collections?action=CLUSTERSTATUS" +{ + "responseHeader":{ + "status":0, + "QTime":1 + }, + "cluster":{ + "collections":{ + "kubedb-collection":{ + "pullReplicas":"0", + "configName":"kubedb-system.AUTOCREATED", + "replicationFactor":1, + "router":{ + "name":"compositeId" + }, + "nrtReplicas":1, + "tlogReplicas":"0", + "shards":{ + "shard1":{ + "range":"80000000-7fffffff", + "state":"active", + "replicas":{ + "core_node2":{ + "core":"kubedb-system_shard1_replica_n1", + "node_name":"solr-combined-2.solr-combined-pods.demo:8983_solr", + "type":"NRT", + "state":"active", + "leader":"true", + "force_set_state":"false", + "base_url":"http://solr-combined-0.solr-combined-pods.demo:8983/solr" + } + }, + "health":"GREEN" + } + }, + "health":"GREEN", + "znodeVersion":4 + } + }, + "live_nodes":["solr-combined-0.solr-combined-pods.demo:8983_solr"] + } +} +``` + +## Create Multi-Node Combined Solr Cluster + +Here, we are going to create a multi-node (say `replicas: 2`) Solr cluster. We will use the Solr image provided by the Solr (`9.6.1`) for this demo. To learn more about Solr CR, visit [here](/docs/guides/solr/concepts/solr.md). + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-combined + namespace: demo +spec: + version: 9.6.1 + deletionPolicy: DoNotTerminate + replicas: 2 + enableSSL: true + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard +``` + +Let's deploy the above example by the following command: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/solr/clusteringyamls/combined-multinode.yaml +solr.kubedb.com/solr-combined created +``` + +Watch the bootstrap progress: + +```bash +$ kubectl get sl -n demo +NAME TYPE VERSION STATUS AGE +solr-combined kubedb.com/v1alpha2 9.6.1 Ready 3h37m + +``` + +Hence the cluster is ready to use. +Let's check the k8s resources created by the operator on the deployment of Elasticsearch CRO: + +```bash +$ kubectl get all,secret,pvc -n demo -l 'app.kubernetes.io/instance=solr-combined' +NAME READY STATUS RESTARTS AGE +pod/solr-combined-0 1/1 Running 0 75s +pod/solr-combined-1 1/1 Running 0 66s + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/solr-combined ClusterIP 10.96.247.33 8983/TCP 78s +service/solr-combined-pods ClusterIP None 8983/TCP 78s + +NAME TYPE VERSION AGE +appbinding.appcatalog.appscode.com/solr-combined kubedb.com/solr 9.6.1 78s + +NAME TYPE DATA AGE +secret/solr-combined-auth kubernetes.io/basic-auth 2 78s +secret/solr-combined-auth-config Opaque 1 78s +secret/solr-combined-client-cert kubernetes.io/tls 5 78s +secret/solr-combined-config Opaque 1 78s +secret/solr-combined-keystore-cred Opaque 1 78s +secret/solr-combined-server-cert kubernetes.io/tls 5 78s +secret/solr-combined-zk-digest kubernetes.io/basic-auth 2 78s +secret/solr-combined-zk-digest-readonly kubernetes.io/basic-auth 2 78s + +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +persistentvolumeclaim/solr-combined-data-solr-combined-0 Bound pvc-c073b8b8-9005-41c5-ac21-bd060a5214a1 1Gi RWO standard 75s +persistentvolumeclaim/solr-combined-data-solr-combined-1 Bound pvc-69b509b1-5e42-4b7e-a64e-1b8e15b25bc7 1Gi RWO standard 66s +``` + + + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +$ kubectl patch -n demo solr solr-combined -p '{"spec":{"deletionPolicy":"WipeOut"}}' --type="merge" +solr.kubedb.com/solr-combined patched + +$ kubectl delete -n demo sl/solr-combined +solr.kubedb.com "solr-combined" deleted + +$ kubectl delete namespace demo +namespace "demo" deleted +``` diff --git a/docs/guides/solr/clustering/topology_cluster.md b/docs/guides/solr/clustering/topology_cluster.md new file mode 100644 index 0000000000..5abd15127f --- /dev/null +++ b/docs/guides/solr/clustering/topology_cluster.md @@ -0,0 +1,648 @@ +--- +title: Topology Cluster +menu: + docs_{{ .version }}: + identifier: sl-topology-solr + name: Topology Cluster + parent: sl-clustering-solr + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Solr Simple Dedicated Cluster + +## Before You Begin + +At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +Now, install the KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create namespace demo +namespace/demo created + +$ kubectl get namespace +NAME STATUS AGE +demo Active 7s +``` + +> Note: YAML files used in this tutorial are stored in [here](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/guides/solr/clustering/yamls) in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Find Available StorageClass + +We will have to provide `StorageClass` in Solr CR specification. Check available `StorageClass` in your cluster using the following command, + +```bash +$ kubectl get storageclass +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +standard (default) rancher.io/local-path Delete WaitForFirstConsumer false 1h +``` + +Here, we have `standard` StorageClass in our cluster from [Local Path Provisioner](https://github.com/rancher/local-path-provisioner). + +## Create Solr Topology Cluster + +We are going to create a Solr Cluster in topology mode. Our cluster will be composed of 1 overseer nodes, 2 data nodes, 1 coordinator nodes. Here, we are using Solr version ( `9.4.1` ). To learn more about the Solr CR, visit [here](/docs/guides/solr/concepts/solr.md). + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-cluster + namespace: demo +spec: + enableSSL: true + tls: + issuerRef: + apiGroup: cert-manager.io + name: self-signed-issuer + kind: ClusterIssuer + certificates: + - alias: server + subject: + organizations: + - kubedb:server + dnsNames: + - localhost + ipAddresses: + - "127.0.0.1" + deletionPolicy: DoNotTerminate + version: 9.4.1 + zookeeperRef: + name: zoo-com + namespace: demo + topology: + overseer: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + data: + replicas: 2 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + coordinator: + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard +``` + +Here, + +- `spec.version` - is the name of the SolrVersion CR. Here, we are using Solr version `9.4.1`. +- `spec.enableSSL` - specifies whether the HTTP layer is secured with certificates or not. +- `spec.storageType` - specifies the type of storage that will be used for Solr database. It can be `Durable` or `Ephemeral`. The default value of this field is `Durable`. If `Ephemeral` is used then KubeDB will create the Solr database using `EmptyDir` volume. In this case, you don't have to specify `spec.storage` field. This is useful for testing purposes. +- `spec.topology` - specifies the node-specific properties for the Solr cluster. + - `topology.overseer` - specifies the properties of overseer nodes. + - `overseer.replicas` - specifies the number of overseer nodes. + - `overseer.storage` - specifies the overseer node storage information that passed to the PetSet. + - `topology.data` - specifies the properties of data nodes. + - `data.replicas` - specifies the number of data nodes. + - `data.storage` - specifies the data node storage information that passed to the PetSet. + - `topology.coordinator` - specifies the properties of coordinator nodes. + - `coordinator.replicas` - specifies the number of coordinator nodes. + - `coordinator.storage` - specifies the coordinator node storage information that passed to the PetSet. + +Let's deploy the above example by the following command: + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/solr/clustering/yamls/topology.yaml +solr.kubedb.com/solr-cluster created +``` +KubeDB will create the necessary resources to deploy the Solr cluster according to the above specification. Let’s wait until the database to be ready to use, + +```bash +$ kubectl get sl -n demo +NAME TYPE VERSION STATUS AGE +solr-cluster kubedb.com/v1alpha2 9.4.1 Ready 3d2h +``` +Here, Solr is in `Ready` state. It means the database is ready to accept connections. + +Describe the Solr object to observe the progress if something goes wrong or the status is not changing for a long period of time: + +```bash +$ kubectl describe sl -n demo solr-cluster +Name: solr-cluster +Namespace: demo +Labels: +Annotations: +API Version: kubedb.com/v1alpha2 +Kind: Solr +Metadata: + Creation Timestamp: 2024-10-25T10:51:15Z + Finalizers: + kubedb.com + Generation: 4 + Resource Version: 439177 + UID: caefb440-1f25-4994-98c0-11fa7afca778 +Spec: + Auth Config Secret: + Name: solr-cluster-auth-config + Auth Secret: + Name: solr-cluster-admin-cred + Deletion Policy: Delete + Health Checker: + Failure Threshold: 3 + Period Seconds: 20 + Timeout Seconds: 10 + Pod Template: + Controller: + Metadata: + Spec: + Pod Placement Policy: + Name: default + Solr Modules: + s3-repository + gcs-repository + prometheus-exporter + Solr Opts: + -Daws.accessKeyId=local-identity + -Daws.secretAccessKey=local-credential + Storage Type: Durable + Topology: + Coordinator: + Pod Template: + Controller: + Metadata: + Spec: + Containers: + Name: solr + Resources: + Limits: + Memory: 2Gi + Requests: + Cpu: 900m + Memory: 2Gi + Security Context: + Allow Privilege Escalation: false + Capabilities: + Drop: + ALL + Run As Non Root: true + Run As User: 8983 + Seccomp Profile: + Type: RuntimeDefault + Init Containers: + Name: init-solr + Resources: + Limits: + Memory: 512Mi + Requests: + Cpu: 200m + Memory: 512Mi + Security Context: + Allow Privilege Escalation: false + Capabilities: + Drop: + ALL + Run As Non Root: true + Run As User: 8983 + Seccomp Profile: + Type: RuntimeDefault + Pod Placement Policy: + Name: default + Security Context: + Fs Group: 8983 + Replicas: 1 + Storage: + Access Modes: + ReadWriteOnce + Resources: + Requests: + Storage: 1Gi + Storage Class Name: standard + Suffix: coordinator + Data: + Pod Template: + Controller: + Metadata: + Spec: + Containers: + Name: solr + Resources: + Limits: + Memory: 2Gi + Requests: + Cpu: 900m + Memory: 2Gi + Security Context: + Allow Privilege Escalation: false + Capabilities: + Drop: + ALL + Run As Non Root: true + Run As User: 8983 + Seccomp Profile: + Type: RuntimeDefault + Init Containers: + Name: init-solr + Resources: + Limits: + Memory: 512Mi + Requests: + Cpu: 200m + Memory: 512Mi + Security Context: + Allow Privilege Escalation: false + Capabilities: + Drop: + ALL + Run As Non Root: true + Run As User: 8983 + Seccomp Profile: + Type: RuntimeDefault + Pod Placement Policy: + Name: default + Security Context: + Fs Group: 8983 + Replicas: 1 + Storage: + Access Modes: + ReadWriteOnce + Resources: + Requests: + Storage: 1Gi + Storage Class Name: standard + Suffix: data + Overseer: + Pod Template: + Controller: + Metadata: + Spec: + Containers: + Name: solr + Resources: + Limits: + Memory: 2Gi + Requests: + Cpu: 900m + Memory: 2Gi + Security Context: + Allow Privilege Escalation: false + Capabilities: + Drop: + ALL + Run As Non Root: true + Run As User: 8983 + Seccomp Profile: + Type: RuntimeDefault + Init Containers: + Name: init-solr + Resources: + Limits: + Memory: 512Mi + Requests: + Cpu: 200m + Memory: 512Mi + Security Context: + Allow Privilege Escalation: false + Capabilities: + Drop: + ALL + Run As Non Root: true + Run As User: 8983 + Seccomp Profile: + Type: RuntimeDefault + Pod Placement Policy: + Name: default + Security Context: + Fs Group: 8983 + Replicas: 1 + Storage: + Access Modes: + ReadWriteOnce + Resources: + Requests: + Storage: 1Gi + Storage Class Name: standard + Suffix: overseer + Version: 9.4.1 + Zookeeper Digest Readonly Secret: + Name: solr-cluster-zk-digest-readonly + Zookeeper Digest Secret: + Name: solr-cluster-zk-digest + Zookeeper Ref: + Name: zoo + Namespace: demo +Status: + Conditions: + Last Transition Time: 2024-10-25T10:51:15Z + Message: The KubeDB operator has started the provisioning of Solr: demo/solr-cluster + Observed Generation: 1 + Reason: DatabaseProvisioningStartedSuccessfully + Status: True + Type: ProvisioningStarted + Last Transition Time: 2024-10-25T11:05:22Z + Message: All desired replicas are ready. + Observed Generation: 4 + Reason: AllReplicasReady + Status: True + Type: ReplicaReady + Last Transition Time: 2024-10-25T11:05:40Z + Message: The Solr: demo/solr-cluster is accepting connection + Observed Generation: 4 + Reason: DatabaseAcceptingConnectionRequest + Status: True + Type: AcceptingConnection + Last Transition Time: 2024-10-25T11:05:40Z + Message: The Solr: demo/solr-cluster is accepting write request. + Observed Generation: 4 + Reason: DatabaseWriteAccessCheckSucceeded + Status: True + Type: DatabaseWriteAccess + Last Transition Time: 2024-10-25T11:06:00Z + Message: The Solr: demo/solr-cluster is accepting read request. + Observed Generation: 4 + Reason: DatabaseReadAccessCheckSucceeded + Status: True + Type: DatabaseReadAccess + Last Transition Time: 2024-10-25T11:05:40Z + Message: The Solr: demo/solr-cluster is ready + Observed Generation: 4 + Reason: AllReplicasReady,AcceptingConnection,ReadinessCheckSucceeded,DatabaseWriteAccessCheckSucceeded + Status: True + Type: Ready + Last Transition Time: 2024-10-25T10:52:31Z + Message: The Solr: demo/solr-cluster is successfully provisioned. + Observed Generation: 1 + Reason: DatabaseSuccessfullyProvisioned + Status: True + Type: Provisioned + Phase: Ready +Events: +``` +- Here, in `Status.Conditions` + - `Conditions.Status` is `True` for the `Condition.Type:ProvisioningStarted` which means database provisioning has been started successfully. + - `Conditions.Status` is `True` for the `Condition.Type:ReplicaReady` which specifies all replicas are ready in the cluster. + - `Conditions.Status` is `True` for the `Condition.Type:AcceptingConnection` which means database has been accepting connection request. + - `Conditions.Status` is `True` for the `Condition.Type:Ready` which defines database is ready to use. + - `Conditions.Status` is `True` for the `Condition.Type:Provisioned` which specifies Database has been successfully provisioned. + +### KubeDB Operator Generated Resources + +Let's check the Kubernetes resources created by the operator on the deployment of Solr CRO: + +```bash +$ kubectl get all,secret,pvc -n demo -l 'app.kubernetes.io/instance=solr-cluster' +NAME READY STATUS RESTARTS AGE +pod/solr-cluster-coordinator-0 1/1 Running 0 3d2h +pod/solr-cluster-data-0 1/1 Running 0 3d2h +pod/solr-cluster-data-1 1/1 Running 0 3d2h +pod/solr-cluster-overseer-0 1/1 Running 0 3d2h + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/solr-cluster ClusterIP 10.43.2.22 8983/TCP 3d2h +service/solr-cluster-pods ClusterIP None 8983/TCP 3d2h + +NAME TYPE VERSION AGE +appbinding.appcatalog.appscode.com/solr-cluster kubedb.com/solr 9.4.1 3d2h + +NAME TYPE DATA AGE +secret/solr-cluster-admin-cred kubernetes.io/basic-auth 2 10d +secret/solr-cluster-auth-config Opaque 1 10d +secret/solr-cluster-config Opaque 1 3d2h +secret/solr-cluster-zk-digest kubernetes.io/basic-auth 2 10d +secret/solr-cluster-zk-digest-readonly kubernetes.io/basic-auth 2 10d + +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE +persistentvolumeclaim/solr-cluster-data-solr-cluster-coordinator-0 Bound pvc-66f8d7f3-dd6e-4347-8b06-8c4a598c096b 1Gi RWO standard 3d2h +persistentvolumeclaim/solr-cluster-data-solr-cluster-data-0 Bound pvc-6c7c1f9d-68cd-4ed6-b151-6d1b88dccbe0 1Gi RWO standard 3d2h +persistentvolumeclaim/solr-cluster-data-solr-cluster-data-1 Bound pvc-6c7d1f9d-68cd-4ed6-b151-6d1b88dccbe0 1Gi RWO standard 3d2h +persistentvolumeclaim/solr-cluster-data-solr-cluster-overseer-0 Bound pvc-106da684-7414-44a7-97e1-f13b65834c36 1Gi RWO standard 3d2h +``` + +- `PetSet` - 3 PetSets are created for 3 types Solr nodes. The PetSets are named after the Solr instance with given suffix: `{Solr-Name}-{Sufix}`. +- `Services` - 3 services are generated for each Solr database. + - `{Solr-Name}` - the client service which is used to connect to the database. It points to the `overseer` nodes. + - `{Solr-Name}-pods` - the node discovery service which is used by the Solr nodes to communicate each other. It is a headless service. +- `AppBinding` - an [AppBinding](/docs/guides/solr/concepts/appbinding.md) which hold the connect information for the database. It is also named after the Elastics +- `Secrets` - 3 types of secrets are generated for each Solr database. + - `{Solr-Name}-auth` - the auth secrets which hold the `username` and `password` for the Solr users. + - `{Solr-Name}-config` - the default configuration secret created by the operator. + +## Connect with Solr Database + +We will use [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to connect with our Solr database. Then we will use `curl` to send `HTTP` requests to check cluster health to verify that our Solr database is working well. + +#### Port-forward the Service + +KubeDB will create few Services to connect with the database. Let’s check the Services by following command, + +```bash +$ kubectl get svc -n demo -l 'app.kubernetes.io/instance=solr-cluster' +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +solr-cluster ClusterIP 10.43.2.22 8983/TCP 3d2h +solr-cluster-pods ClusterIP None 8983/TCP 3d2h +``` +Here, we are going to use `solr-cluster` Service to connect with the database. Now, let’s port-forward the `es-cluster` Service to the port `9200` to local machine: + +```bash +$ kubectl port-forward -n demo svc/solr-cluster 8983 +Forwarding from 127.0.0.1:8983 -> 8983 +``` +Now, our Solr cluster is accessible at `localhost:8983`. + +#### Export the Credentials + +KubeDB also create some Secrets for the database. Let’s check which Secrets have been created by KubeDB for our `es-cluster`. + +```bash +$ kubectl get secret -n demo +NAME TYPE DATA AGE +solr-cluster-auth kubernetes.io/basic-auth 2 10d +solr-cluster-auth-config Opaque 1 10d +solr-cluster-config Opaque 1 3d2h +solr-cluster-zk-digest kubernetes.io/basic-auth 2 10d +solr-cluster-zk-digest-readonly kubernetes.io/basic-auth 2 10d +``` +Now, we can connect to the database with `solr-cluster-auth` which contains the admin level credentials to connect with the database. + +### Accessing Database Through CLI + +To access the database through CLI, we have to get the credentials to access. Let’s export the credentials as environment variable to our current shell : + +```bash +$ kubectl get secret -n demo solr-cluster-auth -o jsonpath='{.data.username}' | base64 -d +elastic +$ kubectl get secret -n demo solr-cluster-auth -o jsonpath='{.data.password}' | base64 -d +tS$k!2IBI.ASI7FJ +``` + +Now, let's check the health of our Solr cluster + +```bash +# curl -XGET -k -u 'username:password' https://localhost:9200/_cluster/health?pretty" +$ curl -XGET -k --user "admin:7eONFVgU9BS50eiB" "http://localhost:8983/solr/admin/collections?action=CLUSTERSTATUS" +{ + "responseHeader":{ + "status":0, + "QTime":1 + }, + "cluster":{ + "collections":{ + "kubedb-system":{ + "pullReplicas":"0", + "configName":"kubedb-system.AUTOCREATED", + "replicationFactor":1, + "router":{ + "name":"compositeId" + }, + "nrtReplicas":1, + "tlogReplicas":"0", + "shards":{ + "shard1":{ + "range":"80000000-7fffffff", + "state":"active", + "replicas":{ + "core_node2":{ + "core":"kubedb-system_shard1_replica_n1", + "node_name":"solr-cluster-data-0.solr-cluster-pods.demo:8983_solr", + "type":"NRT", + "state":"active", + "leader":"true", + "force_set_state":"false", + "base_url":"http://solr-cluster-data-0.solr-cluster-pods.demo:8983/solr" + } + }, + "health":"GREEN" + } + }, + "health":"GREEN", + "znodeVersion":14 + } + }, + "properties":{ + "urlScheme":"http" + }, + "live_nodes":["solr-cluster-data-0.solr-cluster-pods.demo:8983_solr","solr-cluster-overseer-0.solr-cluster-pods.demo:8983_solr","solr-cluster-coordinator-0.solr-cluster-pods.demo:8983_solr"] + } +} + +``` + +## Insert Sample Data + +Now, we are going to insert some data into Solr. + +```bash +$ curl -XPOST -k -u "admin:7eONFVgU9BS50eiB" "http://localhost:8983/solr/admin/collections?action=CREATE&name=book&numShards=2&replicationFactor=2&wt=xml" + + + + + 0 + 1721 + + + + + 0 + 272 + + book_shard1_replica_n5 + + + + 0 + 273 + + book_shard2_replica_n6 + + + + 0 + 1145 + + book_shard2_replica_n1 + + + + 0 + 1150 + + book_shard1_replica_n2 + + +``` +Now, let’s verify that the index have been created successfully. + +```bash +$ curl -XGET -k --user "admin:7eONFVgU9BS50eiB" "http://localhost:8983/solr/admin/collections?action=LIST" +{ + "responseHeader":{ + "status":0, + "QTime":2 + }, + "collections":["book","kubedb-system"] +} +``` +Also, let’s verify the data in the indexes: + +```bash +$ curl -X POST -u "admin:7eONFVgU9BS50eiB" http://localhost:8983/solr/book/select -H 'Content-Type: application/json' -d ' + { + "query": "*:*", + "limit": 10, + }' +{ + "responseHeader":{ + "zkConnected":true, + "status":0, + "QTime":1, + "params":{ + "json":"\n {\n \"query\": \"*:*\",\n \"limit\": 10,\n }\n ", + "_forwardedCount":"1" + } + }, + "response":{ + "numFound":1, + "start":0, + "numFoundExact":true, + "docs":[{ + "id":"1", + "db":["elasticsearch"], + "_version_":1814163798543564800 + }] + } +} + +``` + + +## Cleaning Up + +To cleanup the k8s resources created by this tutorial, run: + +```bash +$ kubectl patch -n demo solr solr-cluster -p '{"spec":{"deletionPolicy":"WipeOut"}}' --type="merge" + +$ kubectl delete Solr -n demo solr-cluster + +# Delete namespace +$ kubectl delete namespace demo +``` + +## Next Steps + +- Monitor your Solr database with KubeDB using [`out-of-the-box` builtin-Prometheus](/docs/guides/solr/monitoring/prometheus-builtin.md). +- Monitor your Solr database with KubeDB using [`out-of-the-box` Prometheus operator](/docs/guides/solr/monitoring/prometheus-operator.md). +- Detail concepts of [Solr object](/docs/guides/solr/concepts/solr.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). \ No newline at end of file diff --git a/docs/guides/solr/clustering/yamls/combined-multinode.yaml b/docs/guides/solr/clustering/yamls/combined-multinode.yaml new file mode 100644 index 0000000000..cccef9b13e --- /dev/null +++ b/docs/guides/solr/clustering/yamls/combined-multinode.yaml @@ -0,0 +1,20 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-combined + namespace: demo +spec: + version: 9.6.1 + deletionPolicy: DoNotTerminate + replicas: 2 + enableSSL: true + zookeeperRef: + name: zoo-com + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard \ No newline at end of file diff --git a/docs/guides/solr/clustering/yamls/combined-standalone.yaml b/docs/guides/solr/clustering/yamls/combined-standalone.yaml new file mode 100644 index 0000000000..8df2ab12d4 --- /dev/null +++ b/docs/guides/solr/clustering/yamls/combined-standalone.yaml @@ -0,0 +1,20 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-combined + namespace: demo +spec: + version: 9.6.1 + deletionPolicy: DoNotTerminate + replicas: 1 + enableSSL: true + zookeeperRef: + name: zoo-com + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard \ No newline at end of file diff --git a/docs/guides/solr/clustering/yamls/topology.yaml b/docs/guides/solr/clustering/yamls/topology.yaml new file mode 100644 index 0000000000..8e9dd82901 --- /dev/null +++ b/docs/guides/solr/clustering/yamls/topology.yaml @@ -0,0 +1,39 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-cluster + namespace: demo +spec: + enableSSL: true + deletionPolicy: DoNotTerminate + version: 9.6.1 + zookeeperRef: + name: zoo-com + namespace: demo + topology: + overseer: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + data: + replicas: 2 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + coordinator: + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard \ No newline at end of file diff --git a/docs/guides/solr/concepts/autoscaler.md b/docs/guides/solr/concepts/autoscaler.md new file mode 100644 index 0000000000..6b9a12ebf7 --- /dev/null +++ b/docs/guides/solr/concepts/autoscaler.md @@ -0,0 +1,161 @@ +--- +title: AppBinding CRD +menu: + docs_{{ .version }}: + identifier: sl-solrautoscaler-solr + name: Autoscaler + parent: sl-concepts-solr + weight: 50 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# SolrAutoscaler + +## What is SolrAutoscaler + +`SolrAutoscaler` is a Kubernetes `Custom Resource Definitions` (CRD). It provides a declarative configuration for autoscaling [Solr](https://solr.apache.org/guide/solr/latest/index.html) compute resources and storage of database components in a Kubernetes native way. + +## SolrAutoscaler CRD Specifications + +Like any official Kubernetes resource, a `SolrAutoscaler` has `TypeMeta`, `ObjectMeta`, `Spec` and `Status` sections. + +Here, some sample `SolrAutoscaler` CROs for autoscaling different components of database is given below: + +**Sample `SolrAutoscaler` YAML for an Solr combined cluster:** + +```yaml +apiVersion: autoscaling.kubedb.com/v1alpha1 +kind: SolrAutoscaler +metadata: + name: sl-as + namespace: demo +spec: + databaseRef: + name: solr-combined + opsRequestOptions: + timeout: 3m + apply: IfReady + compute: + node: + trigger: "On" + podLifeTimeThreshold: 24h + minAllowed: + cpu: 1 + memory: 2Gi + maxAllowed: + cpu: 2 + memory: 3Gi + controlledResources: ["cpu", "memory"] + containerControlledValues: "RequestsAndLimits" + resourceDiffPercentage: 10 + storage: + node: + expansionMode: "Online" + trigger: "On" + usageThreshold: 60 + scalingThreshold: 50 +``` + +**Sample `SolrAutoscaler` YAML for the Solr topology cluster:** + +```yaml +apiVersion: autoscaling.kubedb.com/v1alpha1 +kind: SolrAutoscaler +metadata: + name: sl-as-topology + namespace: demo +spec: + databaseRef: + name: solr-cluster + compute: + overseer: + trigger: "On" + podLifeTimeThreshold: 24h + minAllowed: + cpu: 250m + memory: 350Mi + maxAllowed: + cpu: 1 + memory: 1Gi + controlledResources: ["cpu", "memory"] + containerControlledValues: "RequestsAndLimits" + resourceDiffPercentage: 10 + data: + trigger: "On" + podLifeTimeThreshold: 24h + minAllowed: + cpu: 250m + memory: 350Mi + maxAllowed: + cpu: 1 + memory: 1Gi + controlledResources: ["cpu", "memory"] + containerControlledValues: "RequestsAndLimits" + resourceDiffPercentage: 10 + coordinator: + trigger: "On" + podLifeTimeThreshold: 24h + minAllowed: + cpu: 250m + memory: 350Mi + maxAllowed: + cpu: 1 + memory: 1Gi + controlledResources: ["cpu", "memory"] + containerControlledValues: "RequestsAndLimits" + resourceDiffPercentage: 10 + storage: + data: + expansionMode: "Online" + trigger: "On" + usageThreshold: 60 + scalingThreshold: 50 +``` + +Here, we are going to describe the various sections of a `SolrAutoscaler` crd. + +A `SolrAutoscaler` object has the following fields in the `spec` section. + +### spec.databaseRef + +`spec.databaseRef` is a `required` field that point to the [Solr](/docs/guides/solr/concepts/solr.md) object for which the autoscaling will be performed. This field consists of the following sub-field: + +- **spec.databaseRef.name :** specifies the name of the [Solr](/docs/guides/solr/concepts/solr.md) object. + +### spec.compute + +`spec.compute` specifies the autoscaling configuration for the compute resources i.e. cpu and memory of the database components. This field consists of the following sub-field: + +- `spec.compute.node` indicates the desired compute autoscaling configuration for a combined Solr cluster. +- `spec.compute.overseer` indicates the desired compute autoscaling configuration for overseer nodes. +- `spec.compute.data` indicates the desired compute autoscaling configuration for data nodes. +- `spec.compute.coordinator` indicates the desired compute autoscaling configuration for coordinator nodes. + +All of them has the following sub-fields: + +- `trigger` indicates if compute autoscaling is enabled for this component of the database. If "On" then compute autoscaling is enabled. If "Off" then compute autoscaling is disabled. +- `minAllowed` specifies the minimal amount of resources that will be recommended, default is no minimum. +- `maxAllowed` specifies the maximum amount of resources that will be recommended, default is no maximum. +- `controlledResources` specifies which type of compute resources (cpu and memory) are allowed for autoscaling. Allowed values are "cpu" and "memory". +- `containerControlledValues` specifies which resource values should be controlled. Allowed values are "RequestsAndLimits" and "RequestsOnly". +- `resourceDiffPercentage` specifies the minimum resource difference between recommended value and the current value in percentage. If the difference percentage is greater than this value than autoscaling will be triggered. +- `podLifeTimeThreshold` specifies the minimum pod lifetime of at least one of the pods before triggering autoscaling. + +### spec.storage + +`spec.storage` specifies the autoscaling configuration for the storage resources of the database components. This field consists of the following sub-field: + +- `spec.storage.node` indicates the desired storage autoscaling configuration for a combined Solr cluster. +- `spec.storage.topology` indicates the desired storage autoscaling configuration for different type of nodes running in the Solr topology cluster mode. +- `spec.storage.overseer` indicates the desired storage autoscaling configuration for the overseer nodes. +- `spec.storage.data` indicates the desired storage autoscaling configuration for the data nodes. +- `spec.storage.coordinator` indicates the desired storage autoscaling configuration for the coordinator nodes. + +All of them has the following sub-fields: + +- `trigger` indicates if storage autoscaling is enabled for this component of the database. If "On" then storage autoscaling is enabled. If "Off" then storage autoscaling is disabled. +- `usageThreshold` indicates usage percentage threshold, if the current storage usage exceeds then storage autoscaling will be triggered. +- `scalingThreshold` indicates the percentage of the current storage that will be scaled. diff --git a/docs/guides/solr/concepts/solr.md b/docs/guides/solr/concepts/solr.md index 0654789ecf..9c27a74866 100644 --- a/docs/guides/solr/concepts/solr.md +++ b/docs/guides/solr/concepts/solr.md @@ -154,6 +154,121 @@ type: Opaque Secrets provided by users are not managed by KubeDB, and therefore, won't be modified or garbage collected by the KubeDB operator (version 0.13.0 and higher). +### spec.topology + +`spec.topology` is an `optional` field that provides a way to configure different types of nodes for the Solr cluster. This field enables you to specify how many nodes you want to act as `overseer`, `data`, `coordinator` or other node roles for Elasticsearch. You can also specify how much storage and resources to allocate for each type of node independently. + +Currently supported node types are - +- **data**: Data nodes hold the shards that contain the documents you have indexed. Data nodes handle data related operations like CRUD, search, and aggregations +- **overseer**: Overseer nodes can execute shard distributions, composed of one or more overseer processors +- **coordinator**: The coordinator node can act as if it has replicas of all collections in the cluster when a query is performed. + +```yaml + topology: + data: + replicas: 3 + podTemplate: + spec: + containers: + - name: "solr" + resources: + requests: + cpu: "900m" + limits: + cpu: "900m" + memory: "2.5Gi" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + suffix: data + overseer: + replicas: 3 + podTemplate: + spec: + containers: + - name: "solr" + resources: + requests: + cpu: "900m" + limits: + cpu: "900m" + memory: "2.5Gi" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + suffix: overseer + coordinator: + replicas: 2 + podTemplate: + spec: + containers: + - name: "solr" + resources: + requests: + cpu: "900m" + limits: + cpu: "900m" + memory: "2.5Gi" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + suffix: coordinator +``` + +The `spec.topology` contains the following fields: + +- `topology.overseer`: + - `replicas` (`: "1"`) - is an `optional` field to specify the number of nodes (ie. pods ) that act as the `overseer` nodes. Defaults to `1`. + - `suffix` (`: "overseer"`) - is an `optional` field that is added as the suffix of the overseer PetSet name. Defaults to `overseer`. + - `storage` is a `required` field that specifies how much storage to claim for each of the `overseer` nodes. + - `resources` (`: "cpu: 900m, memory: 2Gi" `) - is an `optional` field that specifies how much computational resources to request or to limit for each of the `overseer` nodes. + +- `topology.data`: + - `replicas` (`: "1"`) - is an `optional` field to specify the number of nodes (ie. pods ) that act as the `data` nodes. Defaults to `1`. + - `suffix` (`: "data"`) - is an `optional` field that is added as the suffix of the data PetSet name. Defaults to `data`. + - `storage` is a `required` field that specifies how much storage to claim for each of the `data` nodes. + - `resources` (` cpu: 900m, memory: 2Gi `) - is an `optional` field that specifies which amount of computational resources to request or to limit for each of the `data` nodes. + +- `topology.coordinator`: + - `replicas` (`: "1"`) - is an `optional` field to specify the number of nodes (ie. pods ) that act as the `coordinator` nodes. Defaults to `1`. + - `suffix` (`: "coordinator"`) - is an `optional` field that is added as the suffix of the data PetSet name. Defaults to `coordinator`. + - `storage` is a `required` field that specifies how much storage to claim for each of the `coordinator` nodes. + - `resources` (` cpu: 900m, memory: 2Gi `) - is an `optional` field that specifies which amount of computational resources to request or to limit for each of the `data` nodes. + +> Note: Any two types of nodes can't have the same `suffix`. + +If you specify `spec.topology` field then you **do not need** to specify the following fields in Elasticsearch CRD. + +- `spec.replicas` +- `spec.storage` +- `spec.podTemplate.spec.resources` + +If you do not specify `spec.topology` field, the Elasticsearch Cluster runs in combined mode. + +> Combined Mode: all nodes of the Elasticsearch cluster will work as `overseer`, `data` and `coordinator` nodes simultaneously. + +### spec.replicas + +`spec.replicas` is an `optional` field that can be used if `spec.topology` is not specified. This field specifies the number of nodes (ie. pods) in the Elasticsearch cluster. The default value of this field is `1`. + +```yaml +spec: + replicas: 3 +``` + + ### spec.zookeeperRef @@ -264,6 +379,45 @@ KubeDB allows following fields to set in `spec.serviceTemplates`: See [here](https://github.com/kmodules/offshoot-api/blob/kubernetes-1.16.3/api/v1/types.go#L163) to understand these fields in detail. +### spec.tls + +> The ReconfigureTLS only works with the [Cert-Manager](https://cert-manager.io/docs/concepts/) managed certificates. [Installation guide](https://cert-manager.io/docs/installation/). + +`spec.tls` is an `optional` field, but it acts as a `required` field when the `spec.type` is set to `ReconfigureTLS`. It specifies the necessary information required to add or remove or update the TLS configuration of the Solr cluster. It consists of the following sub-fields: + +- `tls.remove` ( `bool` | `false` ) - tells the operator to remove the TLS configuration for the HTTP layer. The transport layer is always secured with certificates, so the removal process does not affect the transport layer. +- `tls.rotateCertificates` ( `bool` | `false`) - tells the operator to renew all the certificates. +- `tls.issuerRef` - is an `optional` field that references to the `Issuer` or `ClusterIssuer` custom resource object of [cert-manager](https://cert-manager.io/docs/concepts/issuer/). It is used to generate the necessary certificate secrets for Solr. If the `issuerRef` is not specified, the operator creates a self-signed CA and also creates necessary certificate (valid: 365 days) secrets using that CA. + - `apiGroup` - is the group name of the resource that is being referenced. Currently, the only supported value is `cert-manager.io`. + - `kind` - is the type of resource that is being referenced. The supported values are `Issuer` and `ClusterIssuer`. + - `name` - is the name of the resource ( `Issuer` or `ClusterIssuer` ) that is being referenced. + +- `tls.certificates` - is an `optional` field that specifies a list of certificate configurations used to configure the certificates. It has the following fields: + - `alias` - represents the identifier of the certificate. It has the following possible value: + - `server` - is used for the server certificate configuration. + - `client` - is used for the client certificate configuration. + + - `secretName` - ( `string` | `"-alias-cert"` ) - specifies the k8s secret name that holds the certificates. + + - `subject` - specifies an `X.509` distinguished name (DN). It has the following configurable fields: + - `organizations` ( `[]string` | `nil` ) - is a list of organization names. + - `organizationalUnits` ( `[]string` | `nil` ) - is a list of organization unit names. + - `countries` ( `[]string` | `nil` ) - is a list of country names (ie. Country Codes). + - `localities` ( `[]string` | `nil` ) - is a list of locality names. + - `provinces` ( `[]string` | `nil` ) - is a list of province names. + - `streetAddresses` ( `[]string` | `nil` ) - is a list of street addresses. + - `postalCodes` ( `[]string` | `nil` ) - is a list of postal codes. + - `serialNumber` ( `string` | `""` ) is a serial number. + + For more details, visit [here](https://golang.org/pkg/crypto/x509/pkix/#Name). + + - `duration` ( `string` | `""` ) - is the period during which the certificate is valid. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300m"`, `"1.5h"` or `"20h45m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + - `renewBefore` ( `string` | `""` ) - is a specifiable time before expiration duration. + - `dnsNames` ( `[]string` | `nil` ) - is a list of subject alt names. + - `ipAddresses` ( `[]string` | `nil` ) - is a list of IP addresses. + - `uris` ( `[]string` | `nil` ) - is a list of URI Subject Alternative Names. + - `emailAddresses` ( `[]string` | `nil` ) - is a list of email Subject Alternative Names. + ### spec.deletionPolicy `deletionPolicy` gives flexibility whether to `nullify`(reject) the delete operation of `Solr` crd or which resources KubeDB should keep or delete when you delete `Solr` crd. KubeDB provides following four deletion policies: diff --git a/docs/guides/solr/concepts/solropsrequests.md b/docs/guides/solr/concepts/solropsrequests.md new file mode 100644 index 0000000000..506c7fd8ad --- /dev/null +++ b/docs/guides/solr/concepts/solropsrequests.md @@ -0,0 +1,516 @@ +--- +title: AppBinding CRD +menu: + docs_{{ .version }}: + identifier: sl-solropsrequest-solr + name: SolrOpsRequest + parent: sl-concepts-solr + weight: 40 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# SolrOpsRequest + +## What is SolrOpsRequest + +`SolrOpsRequest` is a Kubernetes `Custom Resource Definitions` (CRD). It provides a declarative configuration for the [Solr](https://solr.apache.org/guide/solr/latest/index.html) administrative operations like database version update, horizontal scaling, vertical scaling, etc. in a Kubernetes native way. + +## SolrOpsRequest Specifications + +Like any official Kubernetes resource, a `SolrOpsRequest` has `TypeMeta`, `ObjectMeta`, `Spec` and `Status` sections. + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: upgrade-solr + namespace: demo +spec: + apply: IfReady + databaseRef: + name: solr-cluster + type: UpdateVersion + updateVersion: + targetVersion: 9.6.1 +status: + conditions: + - lastTransitionTime: "2024-10-25T06:40:49Z" + message: Successfully updated Solr version + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +Here, we are going to describe the various sections of a `SolrOpsRequest` CRD. + +### spec.type + +`spec.type` is a `required` field that specifies the kind of operation that will be applied to the Solr. The following types of operations are allowed in the `SolrOpsRequest`: + +- `Restart` - is used to perform a smart restart of the Solr cluster. +- `UpdateVersion` - is used to update the version of the Solr in a managed way. The necessary information required for updating the version, must be provided in `spec.updateVersion` field. +- `VerticalScaling` - is used to vertically scale the Solr nodes (ie. pods). The necessary information required for vertical scaling, must be provided in `spec.verticalScaling` field. +- `HorizontalScaling` - is used to horizontally scale the Solr nodes (ie. pods). The necessary information required for horizontal scaling, must be provided in `spec.horizontalScaling` field. +- `VolumeExpansion` - is used to expand the storage of the Solr nodes (ie. pods). The necessary information required for volume expansion, must be provided in `spec.volumeExpansion` field. +- `ReconfigureTLS` - is used to configure the TLS configuration of a running Solr cluster. The necessary information required for reconfiguring the TLS, must be provided in `spec.tls` field. + +> Note: You can only perform one type of operation by using an `SolrOpsRequest` custom resource object. For example, if you want to update your database and scale up its replica then you will need to create two separate `SolrOpsRequest`. At first, you will have to create an `SolrOpsRequest` for updating. Once the update is completed, then you can create another `SolrOpsRequest` for scaling. You should not create two `SolrOpsRequest` simultaneously. + +### spec.databaseRef + +`spec.databaseRef` is a `required` field that points to the [Solr](/docs/guides/solr/concepts/solr.md) object for which the administrative operations will be performed. This field consists of the following sub-field: + +- `databaseRef.name` - specifies the name of the [Solr](/docs/guides/solr/concepts/solr.md) object. + +> Note: The `SolrOpsRequest` should be on the same namespace as the referring `Solr` object. + +### spec.updateVersion + +`spec.updateVersion` is an `optional` field, but it acts as a `required` field when the `spec.type` is set to `UpdateVersion`. +It specifies the desired version information required for the Solr version update. This field consists of the following sub-fields: + +- `updateVersion.targetVersion` refers to an [SolrVersion](/docs/guides/solr/concepts/solrversion.md) CR name that contains the Solr version information required to perform the update. + +> KubeDB does not support downgrade for Solr. + +**Samples:** +Let's assume we have and Solr cluster of version `9.4.1`. The Solr custom resource is named `solr-cluster` and it's provisioned in demo namespace. Now, you want to update your Solr cluster to `9.6.1`. Apply this YAML to update to your desired version. +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: upgrade-solr + namespace: demo +spec: + databaseRef: + name: solr-cluster + type: UpdateVersion + updateVersion: + targetVersion: 9.6.1 +``` + +### spec.horizontalScaling + +`spec.horizontalScaling` is an `optional` field, but it acts as a `required` field when the `spec.type` is set to `HorizontalScaling`. +It specifies the necessary information required to horizontally scale the Solr nodes (ie. pods). It consists of the following sub-field: + +- `horizontalScaling.node` - specifies the desired number of nodes for the Solr cluster running in combined mode (ie. `solr.spec.topology` is `empty`). The value should be greater than the maximum value of replication for the shard of any index. For example, if a shard has `x` replicas, `x+1` data nodes are required to allocate them. +- `horizontalScaling.overseer` - specifies the desired number of overseer nodes. The value should be greater than zero ( >= 1 ). +- `horizontalScaling.data` - specifies the desired number of data nodes. The value should be greater than zero ( >= 1 ). +- `horizontalScaling.coordinator` - specifies the desired number of coordinator nodes. ( >= 1) + +**Samples:** + +- Horizontally scale combined nodes: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: hscale-solr-combined + namespace: demo +spec: + databaseRef: + name: solr-combined + type: HorizontalScaling + horizontalScaling: + node: 2 + ``` + +- Horizontally scale cluster topology: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: hscale-solr-topology + namespace: demo +spec: + databaseRef: + name: solr-cluster + type: HorizontalScaling + horizontalScaling: + coordinator: 2 + data: 2 + overseer: 2 +``` + +- Horizontally scale only data nodes: + + ```yaml + apiVersion: ops.kubedb.com/v1alpha1 + kind: SolrOpsRequest + metadata: + name: hscale-data-nodes + namespace: demo + spec: + type: HorizontalScaling + databaseRef: + name: solr-cluster + horizontalScaling: + data: 4 + ``` + +### spec.verticalScaling + +`spec.verticalScaling` is an `optional` field, but it acts as a `required` field when the `spec.type` is set to `VerticalScaling`. It specifies the necessary information required to vertically scale the Solr node resources (ie. `cpu`, `memory`). It consists of the following sub-field: + +- `verticalScaling.node` - specifies the desired node resources for the Solr cluster running in combined mode (ie. `solr.spec.topology` is `empty`). +- `verticalScaling.overseer` - specifies the desired resources for the overseer nodes. It takes input same as the k8s [resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-types). +- `verticalScaling.data` - specifies the desired node resources for the data nodes. It takes input same as the k8s [resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-types). +- `verticalScaling.coordinator` - specifies the desired node resources for the coordinator nodes. It takes input same as the k8s [resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-types). + +> Note: It is recommended not to use resources below the default one; `cpu: 900m, memory: 2Gi`. + +**Samples:** + +- Vertically scale combined nodes: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: vertical-scale-combined + namespace: demo +spec: + databaseRef: + name: solr-combined + type: VerticalScaling + verticalScaling: + node: + resources: + limits: + cpu: 1 + memory: 2.5Gi + requests: + cpu: 1 + memory: 2.5Gi +``` + +- For topology cluster + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: vertical-scale-topology + namespace: demo +spec: + databaseRef: + name: solr-cluster + type: VerticalScaling + verticalScaling: + data: + resources: + limits: + cpu: 1 + memory: 2.5Gi + requests: + cpu: 1 + memory: 2.5Gi + overseer: + resources: + limits: + cpu: 1 + memory: 2.5Gi + requests: + cpu: 1 + memory: 2.5Gi + coordinator: + resources: + limits: + cpu: 1 + memory: 2.5Gi + requests: + cpu: 1 + memory: 2.5Gi +``` + +- Vertically scale only data nodes: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: vertical-scale-topology + namespace: demo +spec: + databaseRef: + name: solr-cluster + type: VerticalScaling + verticalScaling: + data: + resources: + limits: + cpu: 1 + memory: 2.5Gi + requests: + cpu: 1 + memory: 2.5Gi +``` + +### spec.volumeExpansion + +> Note: To use the volume expansion feature the StorageClass must support volume expansion. + +`spec.volumeExpansion` is an `optional` field, but it acts as a `required` field when the `spec.type` is set to `VolumeExpansion`. It specifies the necessary information required to expand the storage of the Solr node. It consists of the following sub-field: + +- `volumeExpansion.node` - specifies the desired size of the persistent volume for the Solr node running in combined mode (ie. `solr.spec.topology` is `empty`). +- `volumeExpansion.overseer` - specifies the desired size of the persistent volume for the overseer nodes. +- `volumeExpansion.data` - specifies the desired size of the persistent volume for the data nodes. +- `volumeExpansion.coordinator` - specifies the desired size of the persistent volume for the ingest nodes. + +All of them refer to [Quantity](https://v1-22.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#quantity-resource-core) types of Kubernetes. + +> Note: Make sure that the requested volume is greater than the current volume. + +**Samples:** + +- Expand volume for combined nodes: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: volume-expansion-topology + namespace: demo +spec: + apply: IfReady + databaseRef: + name: solr-cluster + type: VolumeExpansion + volumeExpansion: + mode: Offline + node: 4Gi + ``` + +- Expand volume for cluster topology: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: volume-expansion-topology + namespace: demo +spec: + apply: IfReady + databaseRef: + name: solr-cluster + type: VolumeExpansion + volumeExpansion: + mode: Offline + data: 4Gi + overseer : 4Gi + coordinator: 4Gi + ``` + +- Expand volume for only data nodes: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: volume-expansion-topology + namespace: demo +spec: + apply: IfReady + databaseRef: + name: solr-cluster + type: VolumeExpansion + volumeExpansion: + mode: Offline + data: 4Gi + ``` + +### spec.tls + +> The ReconfigureTLS only works with the [Cert-Manager](https://cert-manager.io/docs/concepts/) managed certificates. [Installation guide](https://cert-manager.io/docs/installation/). + +`spec.tls` is an `optional` field, but it acts as a `required` field when the `spec.type` is set to `ReconfigureTLS`. It specifies the necessary information required to add or remove or update the TLS configuration of the Solr cluster. It consists of the following sub-fields: + +- `tls.remove` ( `bool` | `false` ) - tells the operator to remove the TLS configuration for the HTTP layer. The transport layer is always secured with certificates, so the removal process does not affect the transport layer. +- `tls.rotateCertificates` ( `bool` | `false`) - tells the operator to renew all the certificates. +- `tls.issuerRef` - is an `optional` field that references to the `Issuer` or `ClusterIssuer` custom resource object of [cert-manager](https://cert-manager.io/docs/concepts/issuer/). It is used to generate the necessary certificate secrets for Solr. If the `issuerRef` is not specified, the operator creates a self-signed CA and also creates necessary certificate (valid: 365 days) secrets using that CA. + - `apiGroup` - is the group name of the resource that is being referenced. Currently, the only supported value is `cert-manager.io`. + - `kind` - is the type of resource that is being referenced. The supported values are `Issuer` and `ClusterIssuer`. + - `name` - is the name of the resource ( `Issuer` or `ClusterIssuer` ) that is being referenced. + +- `tls.certificates` - is an `optional` field that specifies a list of certificate configurations used to configure the certificates. It has the following fields: + - `alias` - represents the identifier of the certificate. It has the following possible value: + - `server` - is used for the server certificate configuration. + - `client` - is used for the client certificate configuration. + + - `secretName` - ( `string` | `"-alias-cert"` ) - specifies the k8s secret name that holds the certificates. + + - `subject` - specifies an `X.509` distinguished name (DN). It has the following configurable fields: + - `organizations` ( `[]string` | `nil` ) - is a list of organization names. + - `organizationalUnits` ( `[]string` | `nil` ) - is a list of organization unit names. + - `countries` ( `[]string` | `nil` ) - is a list of country names (ie. Country Codes). + - `localities` ( `[]string` | `nil` ) - is a list of locality names. + - `provinces` ( `[]string` | `nil` ) - is a list of province names. + - `streetAddresses` ( `[]string` | `nil` ) - is a list of street addresses. + - `postalCodes` ( `[]string` | `nil` ) - is a list of postal codes. + - `serialNumber` ( `string` | `""` ) is a serial number. + + For more details, visit [here](https://golang.org/pkg/crypto/x509/pkix/#Name). + + - `duration` ( `string` | `""` ) - is the period during which the certificate is valid. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300m"`, `"1.5h"` or `"20h45m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + - `renewBefore` ( `string` | `""` ) - is a specifiable time before expiration duration. + - `dnsNames` ( `[]string` | `nil` ) - is a list of subject alt names. + - `ipAddresses` ( `[]string` | `nil` ) - is a list of IP addresses. + - `uris` ( `[]string` | `nil` ) - is a list of URI Subject Alternative Names. + - `emailAddresses` ( `[]string` | `nil` ) - is a list of email Subject Alternative Names. + +To enable TLS on the HTTP layer, the configuration for the `http` layer certificate needs to be provided on `tls.certificates[]` list. + +**Samples:** + +- Add TLS: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: add-tls + namespace: demo +spec: + apply: IfReady + tls: + issuerRef: + apiGroup: cert-manager.io + name: self-signed-issuer + kind: ClusterIssuer + certificates: + - alias: server + subject: + organizations: + - kubedb:server + dnsNames: + - localhost + ipAddresses: + - "127.0.0.1" + databaseRef: + name: solr-cluster + type: ReconfigureTLS + ``` + +- Remove TLS: + + ```yaml + apiVersion: ops.kubedb.com/v1alpha1 + kind: SolrOpsRequest + metadata: + name: remove-tls + namespace: demo + spec: + type: ReconfigureTLS + databaseRef: + name: solr-cluster + tls: + remove: true + ``` + +- Rotate TLS: + + ```yaml + apiVersion: ops.kubedb.com/v1alpha1 + kind: SolrOpsRequest + metadata: + name: rotate-tls + namespace: demo + spec: + type: ReconfigureTLS + databaseRef: + name: solr-cluster + tls: + rotateCertificates: true + ``` + +### spec.configuration + +If you want to reconfigure your Running Solr cluster or different components of it with new custom configuration, you have to specify `spec.configuration` section. This field consists of the following sub-field: + +- `spec.configuration.configsecret`: ConfigSecret is an optional field to provide custom configuration file for database. +- `spec.configuration.applyConfig`: ApplyConfig is an optional field to provide Solr configuration. Provided configuration will be applied to config files stored in ConfigSecret. If the ConfigSecret is missing, the operator will create a new k8s secret by the following naming convention: {db-name}-user-config. +```yaml + applyConfig: + solr.xml: | + + + solrbackup + us-east-1 + http://s3proxy-s3.demo.svc:80 + + +``` + +- `spec.configuration.removeCustomConfig`: If set to "true", the user provided configuration will be removed. The Solr cluster will start will default configuration that is generated by the operator. + +### spec.timeout +As we internally retry the ops request steps multiple times, This `timeout` field helps the users to specify the timeout for those steps of the ops request (in second). +If a step doesn't finish within the specified timeout, the ops request will result in failure. + +### spec.apply +This field controls the execution of obsRequest depending on the database state. It has two supported values: `Always` & `IfReady`. +Use IfReady, if you want to process the opsRequest only when the database is Ready. And use Always, if you want to process the execution of opsReq irrespective of the Database state. + +## SolrOpsRequest `Status` + +`.status` describes the current state and progress of a `SolrOpsRequest` operation. It has the following fields: + +### status.phase + +`status.phase` indicates the overall phase of the operation for this `SolrOpsRequest`. It can have the following three values: + +| Phase | Meaning | +| :--------: | ---------------------------------------------------------------------------------- | +| Progressing | KubeDB has started to process the Ops request | +| Successful | KubeDB has successfully performed all the operations needed for the Ops request | +| Failed | KubeDB has failed while performing the operations needed for the Ops request | + +### status.observedGeneration + +`status.observedGeneration` shows the most recent generation observed by the `SolrOpsRequest` controller. + +### status.conditions + +`status.conditions` is an array that specifies the conditions of different steps of `SolrOpsRequest` processing. Each condition entry has the following fields: + +- `types` specifies the type of the condition. +- The `status` field is a string, with possible values `True`, `False`, and `Unknown`. + - `status` will be `True` if the current transition succeeded. + - `status` will be `False` if the current transition failed. + - `status` will be `Unknown` if the current transition was denied. +- The `message` field is a human-readable message indicating details about the condition. +- The `reason` field is a unique, one-word, CamelCase reason for the condition's last transition. +- The `lastTransitionTime` field provides a timestamp for when the operation last transitioned from one state to another. +- The `observedGeneration` shows the most recent condition transition generation observed by the controller. + +SolrOpsRequest has the following types of conditions: + +| Type | Meaning | +|-----------------------------|-----------------------------------------------------| +| `Progressing` | The operator has started to process the Ops request | +| `Successful` | The Ops request has successfully executed | +| `Failed` | The operation on the database failed | +| `OrphanPetSetPods` | The petSet has deleted leaving the pods orphaned | +| `ReadyPetSets` | The PetSet are ready | +| `ScaleDownCombinedNode` | Scaled down the combined nodes | +| `ScaleDownDataNode` | Scaled down the data nodes | +| `ScaleDownCoordinatorNode` | Scaled down the coordinator nodes | +| `ScaleDownOverseerNode` | Scaled down the overseer nodes | +| `ScaleUpCombinedNode` | Scaled up the combined nodes | +| `ScaleUpDataNode` | Scaled up the data nodes | +| `ScaleUpCoordinatorNode` | Scaled up the coordinator nodes | +| `ScaleUpOverseerNode` | Scaled up the overseer nodes | +| `UpdateCombinedNodePVCs` | Updated combined node PVCs | +| `UpdateDataNodePVCs` | Updated data node PVCs | +| `UpdateCoordinatorNodePVCs` | Updated coordinator node PVCs | +| `UpdateOverseerNodePVCs` | Updated overseer node PVCs | +| `UpdateNodeResources` | Updated node resources | diff --git a/docs/guides/solr/configuration/_index.md b/docs/guides/solr/configuration/_index.md new file mode 100644 index 0000000000..43ddec3519 --- /dev/null +++ b/docs/guides/solr/configuration/_index.md @@ -0,0 +1,11 @@ +--- +title: Solr Custom Configuration +menu: + docs_{{ .version }}: + identifier: sl-custom-config + name: Custom Configuration + parent: sl-solr-guides + weight: 18 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- \ No newline at end of file diff --git a/docs/guides/solr/configuration/config-file.md b/docs/guides/solr/configuration/config-file.md new file mode 100644 index 0000000000..98f90de09a --- /dev/null +++ b/docs/guides/solr/configuration/config-file.md @@ -0,0 +1,180 @@ +--- +title: Custom Configuration With Config Files +menu: + docs_{{ .version }}: + identifier: sl-custom-config-file + name: Config Files + parent: sl-custom-config + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure Solr Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to reconfigure a Solr cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Solr](/docs/guides/solr/concepts/solr.md) + - [Combined](/docs/guides/solr/clustering/combined_cluster.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/Solr](/docs/examples/solr) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +Now, we are going to deploy a `Solr` cluster using a supported version by `KubeDB` operator. Then we are going to apply `SolrOpsRequest` to reconfigure its configuration. + +### Prepare Solr Cluster + +Now, we are going to deploy a `Solr` cluster with version `9.6.1`. + +### Deploy Solr + +At first, we will create a secret with the `solr.xml` attribute containing required configuration settings. + +**server.properties:** + +```properties +${solr.max.booleanClauses:2024} +``` +Here, `maxBooleanClauses` is set to `2024`, whereas the default value is `1024`. + +Let's create a k8s secret containing the above configuration where the file name will be the key and the file-content as the value: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: sl-custom-config + namespace: demo +stringData: + "solr.xml": | + + ${solr.max.booleanClauses:2024} + + + solrbackup + us-east-1 + http://s3proxy-s3.demo.svc:80 + + + +``` + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/configuration/sl-custom-config.yaml +secret/sl-custom-config created +``` + +In this section, we are going to create a Solr object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `Solr` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr + namespace: demo +spec: + configSecret: + name: sl-custom-config + version: 9.6.1 + replicas: 2 + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: longhorn +``` + +Let's create the `Solr` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/Solr/configuration/solr.yaml +solr.kubedb.com/solr created +``` + +Now, wait until `solr` has status `Ready`. i.e, + +```bash +$ kubectl get sl -n demo +NAME TYPE VERSION STATUS AGE +solr kubedb.com/v1alpha2 9.6.1 Ready 10m +``` + +Now, we will check if the Solr has started with the custom configuration we have provided. + +Exec into the Solr pod and execute the following commands to see the configurations: +```bash +$ kubectl exec -it -n demo solr-0 -- bash +Defaulted container "solr" out of: solr, init-solr (init) +solr@solr-0:/opt/solr-9.6.1$ cat /var/solr/solr.xml + + + + + solrbackup + us-east-1 + http://s3proxy-s3.demo.svc:80 + + + /var/solr/data + ${solr.sharedLib:},/opt/solr/contrib/gcs-repository/lib,/opt/solr/contrib/prometheus-exporter/lib,/opt/solr/contrib/s3-repository/lib,/opt/solr/dist + ${solr.allowPaths:} + ${solr.max.booleanClauses:2024} + + ${connTimeout:60000} + ${socketTimeout:600000} + + + ${distribUpdateConnTimeout:60000} + ${distribUpdateSoTimeout:600000} + ${genericCoreNodeNames:true} + ${host:} + ${hostContext:solr} + ${solr.port.advertise:80} + ${zkACLProvider:org.apache.solr.common.cloud.DigestZkACLProvider} + ${zkClientTimeout:30000} + ${zkCredentialsInjector:org.apache.solr.common.cloud.VMParamsZkCredentialsInjector} + ${zkCredentialsProvider:org.apache.solr.common.cloud.DigestZkCredentialsProvider} + + + + +``` + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete solr -n demo solr-combined +kubectl delete secret -n demo sl-custom-config +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Solr object](/docs/guides/solr/concepts/solr.md). +- Different Solr topology clustering modes [here](/docs/guides/solr/clustering/topology_cluster.md). +- Monitor your Solr database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/solr/monitoring/prometheus-operator.md). + +- Monitor your Solr database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/solr/monitoring/prometheus-builtin.md) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). \ No newline at end of file diff --git a/docs/guides/solr/configuration/custom-pod-template.md b/docs/guides/solr/configuration/custom-pod-template.md new file mode 100644 index 0000000000..fdbd1f7335 --- /dev/null +++ b/docs/guides/solr/configuration/custom-pod-template.md @@ -0,0 +1,675 @@ +--- +title: Run Solr with Custom PodTemplate +menu: + docs_{{ .version }}: + identifier: sl-custom-pod-template + name: Customize PodTemplate + parent: sl-custom-config + weight: 40 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Run Solr with Custom PodTemplate + +KubeDB supports providing custom configuration for Solr via [PodTemplate](/docs/guides/solr/concepts/solr.md#spectopology). This tutorial will show you how to use KubeDB to run a Solr database with custom configuration using PodTemplate. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + + ```bash + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: YAML files used in this tutorial are stored in [docs/guides/solr/configuration/podtemplating/yamls](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/guides/Solr/configuration/podtemplating/yamls) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Overview + +KubeDB allows providing a template for `leaf` and `aggregator` pod through `spec.topology.aggregator.podTemplate` and `spec.topology.leaf.podTemplate`. KubeDB operator will pass the information provided in `spec.topology.aggregator.podTemplate` and `spec.topology.leaf.podTemplate` to the `aggregator` and `leaf` PetSet created for Solr database. + +KubeDB accept following fields to set in `spec.podTemplate:` + +- metadata: + - annotations (pod's annotation) + - labels (pod's labels) +- controller: + - annotations (petset's annotation) + - labels (petset's labels) +- spec: + - volumes + - initContainers + - containers + - imagePullSecrets + - nodeSelector + - affinity + - serviceAccountName + - schedulerName + - tolerations + - priorityClassName + - priority + - securityContext + - livenessProbe + - readinessProbe + - lifecycle + +Read about the fields in details in [PodTemplate concept](/docs/guides/solr/concepts/solr.md#spectopology), + + +## CRD Configuration + +Below is the YAML for the Solr created in this example. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-misc-config + namespace: demo +spec: + version: "9.6.1" + topology: + data: + replicas: 1 + podTemplate: + spec: + containers: + - name: "solr" + resources: + requests: + cpu: "900m" + limits: + cpu: "900m" + memory: "2.5Gi" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + overseer: + replicas: 1 + podTemplate: + spec: + containers: + - name: "solr" + resources: + requests: + cpu: "900m" + limits: + cpu: "900m" + memory: "2.5Gi" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + coordinator: + replicas: 1 + podTemplate: + spec: + containers: + - name: "solr" + resources: + requests: + cpu: "900m" + limits: + cpu: "900m" + memory: "2.5Gi" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard +``` + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/configuration/sl-custom-podtemplate.yaml +Solr.kubedb.com/solr-misc-config created +``` + +Now, wait a few minutes. KubeDB operator will create necessary PVC, petset, services, secret etc. If everything goes well, we will see that a pod with the name `sdb-misc-config-aggregator-0` has been created. + +Check that the petset's pod is running + +```bash +$ kubectl get pod -n demo -l app.kubernetes.io/instance=solr-misc-config +NAME READY STATUS RESTARTS AGE +solr-misc-config-coordinator-0 1/1 Running 0 3m30s +solr-misc-config-data-0 1/1 Running 0 3m35s +solr-misc-config-overseer-0 1/1 Running 0 3m33s +``` + +Now, we will check if the database has started with the custom configuration we have provided. + +```bash +$ kubectl get pod -n demo solr-misc-config-coordinator-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "cpu": "900m", + "memory": "2560Mi" + }, + "requests": { + "cpu": "900m", + "memory": "2560Mi" + } +} + +$ kubectl get pod -n demo solr-misc-config-data-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "cpu": "900m", + "memory": "2560Mi" + }, + "requests": { + "cpu": "900m", + "memory": "2560Mi" + } +} + +$ kubectl get pod -n demo solr-misc-config-overseer-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "cpu": "900m", + "memory": "2560Mi" + }, + "requests": { + "cpu": "900m", + "memory": "2560Mi" + } +} + +``` + + +## Using Node Selector + +Here in this example we will use [node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) to schedule our Solr pod to a specific node. Applying nodeSelector to the Pod involves several steps. We first need to assign a label to some node that will be later used by the `nodeSelector` . Let’s find what nodes exist in your cluster. To get the name of these nodes, you can run: + +```bash +$ kubectl get nodes +NAME STATUS ROLES AGE VERSION +gke-pritam-default-pool-c682fe6e-59x3 Ready 110m v1.30.5-gke.1443001 +gke-pritam-default-pool-c682fe6e-rbtx Ready 110m v1.30.5-gke.1443001 +gke-pritam-default-pool-c682fe6e-spdb Ready 110m v1.30.5-gke.1443001 +gke-pritam-default-pool-cc96ce9b-049h Ready 110m v1.30.5-gke.1443001 +gke-pritam-default-pool-cc96ce9b-b8p8 Ready 110m v1.30.5-gke.1443001 +gke-pritam-default-pool-cc96ce9b-vbpc Ready 110m v1.30.5-gke.1443001 +gke-pritam-default-pool-dadbf4db-5fv5 Ready 110m v1.30.5-gke.1443001 +gke-pritam-default-pool-dadbf4db-5vkv Ready 110m v1.30.5-gke.1443001 +gke-pritam-default-pool-dadbf4db-p039 Ready 110m v1.30.5-gke.1443001 +``` +As you see, we have nine nodes in the cluster. + +Let’s say we want pods to schedule to nodes with key `topology.gke.io/zone` and value `us-central1-b` +```bash +$ kubectl get nodes -n demo -l topology.gke.io/zone=us-central1-b +NAME STATUS ROLES AGE VERSION +gke-pritam-default-pool-c682fe6e-59x3 Ready 118m v1.30.5-gke.1443001 +gke-pritam-default-pool-c682fe6e-rbtx Ready 118m v1.30.5-gke.1443001 +gke-pritam-default-pool-c682fe6e-spdb Ready 118m v1.30.5-gke.1443001 +``` + +As you see, the gke-pritam-default-pool-c682fe6e-59x3 now has a new label topology.gke.io/zone=us-central1-b. To see all labels attached to the node, you can also run: +```bash +$ kubectl describe nodes gke-pritam-default-pool-c682fe6e-59x3 +Name: gke-pritam-default-pool-c682fe6e-59x3 +Roles: +Labels: beta.kubernetes.io/arch=amd64 + beta.kubernetes.io/instance-type=e2-standard-2 + beta.kubernetes.io/os=linux + cloud.google.com/gke-boot-disk=pd-balanced + cloud.google.com/gke-container-runtime=containerd + cloud.google.com/gke-cpu-scaling-level=2 + cloud.google.com/gke-logging-variant=DEFAULT + cloud.google.com/gke-max-pods-per-node=110 + cloud.google.com/gke-memory-gb-scaling-level=8 + cloud.google.com/gke-nodepool=default-pool + cloud.google.com/gke-os-distribution=cos + cloud.google.com/gke-provisioning=standard + cloud.google.com/gke-stack-type=IPV4 + cloud.google.com/machine-family=e2 + cloud.google.com/private-node=false + disktype=ssd + failure-domain.beta.kubernetes.io/region=us-central1 + failure-domain.beta.kubernetes.io/zone=us-central1-b + kubernetes.io/arch=amd64 + kubernetes.io/hostname=gke-pritam-default-pool-c682fe6e-59x3 + kubernetes.io/os=linux + node.kubernetes.io/instance-type=e2-standard-2 + topology.gke.io/zone=us-central1-b + topology.kubernetes.io/region=us-central1 + topology.kubernetes.io/zone=us-central1-b +``` + +Now let's create a Solr with this new label as nodeSelector. Below is the yaml we are going to apply: +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-custom-nodeselector + namespace: demo +spec: + version: 9.6.1 + replicas: 2 + podTemplate: + spec: + nodeSelector: + topology.gke.io/zone: us-central1-b + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + +``` +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/solr/configuration/sl-custom-nodeselector.yaml +solr.kubedb.com/solr-node-selector created +``` +Now, wait a few minutes. KubeDB operator will create necessary petset, services, secret etc. If everything goes well, we will see that a pod with the name `sdb-node-selector-0` has been created. + +Check that the petset's pod is running + +```bash +$ kubectl get pod -n demo -l app.kubernetes.io/instance=solr-custom-nodeselector +NAME READY STATUS RESTARTS AGE +solr-custom-nodeselector-0 1/1 Running 0 3m18s +solr-custom-nodeselector-1 1/1 Running 0 2m54s +``` +As we see the pod is running, you can verify that by running `kubectl get pods -n demo sdb-node-selector-0 -o wide` and looking at the “NODE” to which the Pod was assigned. +```bash +$ kubectl get pod -n demo -l app.kubernetes.io/instance=solr-custom-nodeselector -owide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +solr-custom-nodeselector-0 1/1 Running 0 3m52s 10.12.7.7 gke-pritam-default-pool-c682fe6e-spdb +solr-custom-nodeselector-1 1/1 Running 0 3m28s 10.12.8.9 gke-pritam-default-pool-c682fe6e-59x3 +``` +We can successfully verify that our pod was scheduled to our desired node. + +## Using Taints and Tolerations + +Here in this example we will use [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) to schedule our Solr pod to a specific node and also prevent from scheduling to nodes. Applying taints and tolerations to the Pod involves several steps. Let’s find what nodes exist in your cluster. To get the name of these nodes, you can run: + +```bash +$ kubectl get nodes +NAME STATUS ROLES AGE VERSION +gke-pritam-default-pool-c682fe6e-59x3 Ready 123m v1.30.5-gke.1443001 +gke-pritam-default-pool-c682fe6e-rbtx Ready 123m v1.30.5-gke.1443001 +gke-pritam-default-pool-c682fe6e-spdb Ready 123m v1.30.5-gke.1443001 +gke-pritam-default-pool-cc96ce9b-049h Ready 123m v1.30.5-gke.1443001 +gke-pritam-default-pool-cc96ce9b-b8p8 Ready 123m v1.30.5-gke.1443001 +gke-pritam-default-pool-cc96ce9b-vbpc Ready 123m v1.30.5-gke.1443001 +gke-pritam-default-pool-dadbf4db-5fv5 Ready 123m v1.30.5-gke.1443001 +gke-pritam-default-pool-dadbf4db-5vkv Ready 123m v1.30.5-gke.1443001 +gke-pritam-default-pool-dadbf4db-p039 Ready 123m v1.30.5-gke.1443001 +``` +As you see, we have nine nodes in the cluster + +Next, we are going to taint these nodes. +```bash +$ kubectl taint nodes gke-pritam-default-pool-c682fe6e-59x3 key1=node1:NoSchedule +node/gke-pritam-default-pool-c682fe6e-59x3 tainted +$ kubectl taint nodes gke-pritam-default-pool-c682fe6e-rbtx key1=node2:NoSchedule +node/gke-pritam-default-pool-c682fe6e-rbtx tainted +$ kubectl taint nodes gke-pritam-default-pool-c682fe6e-spdb key1=node3:NoSchedule +node/gke-pritam-default-pool-c682fe6e-spdb tainted +$ kubectl taint nodes gke-pritam-default-pool-cc96ce9b-049h key1=node4:NoSchedule +node/gke-pritam-default-pool-cc96ce9b-049h tainted +$ kubectl taint nodes gke-pritam-default-pool-cc96ce9b-b8p8 key1=node5:NoSchedule +node/gke-pritam-default-pool-cc96ce9b-b8p8 tainted +$ kubectl taint nodes gke-pritam-default-pool-cc96ce9b-vbpc key1=node6:NoSchedule +node/gke-pritam-default-pool-cc96ce9b-vbpc tainted +$ kubectl taint nodes gke-pritam-default-pool-dadbf4db-5fv5 key1=node7:NoSchedule +node/gke-pritam-default-pool-dadbf4db-5fv5 tainted +$ kubectl taint nodes gke-pritam-default-pool-dadbf4db-5vkv key1=node8:NoSchedule +node/gke-pritam-default-pool-dadbf4db-5vkv tainted +$ kubectl taint nodes gke-pritam-default-pool-dadbf4db-p039 key1=node9:NoSchedule +node/gke-pritam-default-pool-dadbf4db-p039 tainted +``` +Let's see our tainted nodes here, +```bash +$ kubectl get nodes -o json | jq -r '.items[] | select(.spec.taints != null) | .metadata.name, .spec.taints' +gke-pritam-default-pool-c682fe6e-59x3 +[ + { + "effect": "NoSchedule", + "key": "key1", + "value": "node1" + } +] +gke-pritam-default-pool-c682fe6e-rbtx +[ + { + "effect": "NoSchedule", + "key": "key1", + "value": "node2" + } +] +gke-pritam-default-pool-c682fe6e-spdb +[ + { + "effect": "NoSchedule", + "key": "key1", + "value": "node3" + } +] +gke-pritam-default-pool-cc96ce9b-049h +[ + { + "effect": "NoSchedule", + "key": "key1", + "value": "node4" + } +] +gke-pritam-default-pool-cc96ce9b-b8p8 +[ + { + "effect": "NoSchedule", + "key": "key1", + "value": "node5" + } +] +gke-pritam-default-pool-cc96ce9b-vbpc +[ + { + "effect": "NoSchedule", + "key": "key1", + "value": "node6" + } +] +gke-pritam-default-pool-dadbf4db-5fv5 +[ + { + "effect": "NoSchedule", + "key": "key1", + "value": "node7" + } +] +gke-pritam-default-pool-dadbf4db-5vkv +[ + { + "effect": "NoSchedule", + "key": "key1", + "value": "node8" + } +] +gke-pritam-default-pool-dadbf4db-p039 +[ + { + "effect": "NoSchedule", + "key": "key1", + "value": "node9" + } +] +``` +We can see that our taints were successfully assigned. Now let's try to create a Solr without proper tolerations. Here is the yaml of Solr we are going to createc +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-without-toleration + namespace: demo +spec: + version: 9.6.1 + replicas: 2 + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +``` +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/solr/configuration/solr-without-tolerations.yaml +solr.kubedb.com/solr-without-tolerations created +``` +Now, wait a few minutes. KubeDB operator will create necessary petset, services, secret etc. If everything goes well, we will see that a pod with the name `sdb-without-tolerations-0` has been created and running. + +Check that the petset's pod is running or not, +```bash +$ kubectl get pod -n demo -l app.kubernetes.io/instance=solr-without-toleration +NAME READY STATUS RESTARTS AGE +solr-without-toleration-0 0/1 Pending 0 64s +``` +Here we can see that the pod is not running. So let's describe the pod, +```bash +$ kubectl describe pod -n demo solr-without-toleration-0 +Name: solr-without-toleration-0 +Namespace: demo +Priority: 0 +Service Account: default +Node: +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=solr-without-toleration + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=solrs.kubedb.com + apps.kubernetes.io/pod-index=0 + controller-revision-hash=solr-without-toleration-7d4b4d4bcc + coordinator=set + data=set + overseer=set + statefulset.kubernetes.io/pod-name=solr-without-toleration-0 +Annotations: cloud.google.com/cluster_autoscaler_unhelpable_since: 2024-11-13T12:32:48+0000 + cloud.google.com/cluster_autoscaler_unhelpable_until: Inf +Status: Pending +IP: +IPs: +Controlled By: PetSet/solr-without-toleration +Init Containers: + init-solr: + Image: ghcr.io/kubedb/solr-init:9.6.1@sha256:dbbee5c25da5666a90fbb5d90d146c3a8d54f04eefacd779b59a248c0972ef15 + Port: + Host Port: + SeccompProfile: RuntimeDefault + Limits: + memory: 512Mi + Requests: + cpu: 200m + memory: 512Mi + Environment: + SOLR_JAVA_MEM: -Xms1g -Xmx3g + SOLR_HOME: /var/solr + SOLR_PORT: 8983 + SOLR_NODE_PORT: 8983 + SOLR_logS_DIR: /var/solr/logs + SOLR_log_LEVEL: DEBUG + SOLR_PORT_ADVERTISE: 8983 + CLUSTER_NAME: solr-without-toleration + POD_HOSTNAME: solr-without-toleration-0 (v1:metadata.name) + POD_NAME: solr-without-toleration-0 (v1:metadata.name) + POD_IP: (v1:status.podIP) + GOVERNING_SERVICE: solr-without-toleration-pods + POD_NAMESPACE: demo (v1:metadata.namespace) + SOLR_HOST: $(POD_NAME).$(GOVERNING_SERVICE).$(POD_NAMESPACE) + ZK_HOST: zoo-0.zoo-pods.demo.svc.cluster.local:2181,zoo-1.zoo-pods.demo.svc.cluster.local:2181,zoo-2.zoo-pods.demo.svc.cluster.local:2181/demosolr-without-toleration + ZK_SERVER: zoo-0.zoo-pods.demo.svc.cluster.local:2181,zoo-1.zoo-pods.demo.svc.cluster.local:2181,zoo-2.zoo-pods.demo.svc.cluster.local:2181 + ZK_CHROOT: /demosolr-without-toleration + SOLR_MODULES: + JAVA_OPTS: + CONNECTION_SCHEME: http + SECURITY_ENABLED: true + SOLR_USER: Optional: false + SOLR_PASSWORD: Optional: false + SOLR_OPTS: -DhostPort=$(SOLR_NODE_PORT) -Dsolr.autoSoftCommit.maxTime=1000 -DzkACLProvider=org.apache.solr.common.cloud.DigestZkACLProvider -DzkCredentialsInjector=org.apache.solr.common.cloud.VMParamsZkCredentialsInjector -DzkCredentialsProvider=org.apache.solr.common.cloud.DigestZkCredentialsProvider -DzkDigestPassword=nwbnmVwBoJhW)eft -DzkDigestReadonlyPassword=7PxFSc)z~DWLL)Tt -DzkDigestReadonlyUsername=zk-digest-readonly -DzkDigestUsername=zk-digest + ZK_CREDS_AND_ACLS: -DzkACLProvider=org.apache.solr.common.cloud.DigestZkACLProvider -DzkCredentialsInjector=org.apache.solr.common.cloud.VMParamsZkCredentialsInjector -DzkCredentialsProvider=org.apache.solr.common.cloud.DigestZkCredentialsProvider -DzkDigestPassword=nwbnmVwBoJhW)eft -DzkDigestReadonlyPassword=7PxFSc)z~DWLL)Tt -DzkDigestReadonlyUsername=zk-digest-readonly -DzkDigestUsername=zk-digest + SOLR_ZK_CREDS_AND_ACLS: -DzkACLProvider=org.apache.solr.common.cloud.DigestZkACLProvider -DzkCredentialsInjector=org.apache.solr.common.cloud.VMParamsZkCredentialsInjector -DzkCredentialsProvider=org.apache.solr.common.cloud.DigestZkCredentialsProvider -DzkDigestPassword=nwbnmVwBoJhW)eft -DzkDigestReadonlyPassword=7PxFSc)z~DWLL)Tt -DzkDigestReadonlyUsername=zk-digest-readonly -DzkDigestUsername=zk-digest + Mounts: + /temp-config from default-config (rw) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-n9nxh (ro) + /var/security from auth-config (rw) + /var/solr from slconfig (rw) +Containers: + solr: + Image: ghcr.io/appscode-images/solr:9.6.1@sha256:b625c7e8c91c8070b23b367cc03a736f2c5c2cb9cfd7981f72c461e57df800a1 + Port: 8983/TCP + Host Port: 0/TCP + SeccompProfile: RuntimeDefault + Limits: + memory: 2Gi + Requests: + cpu: 900m + memory: 2Gi + Environment: + SOLR_JAVA_MEM: -Xms1g -Xmx3g + SOLR_HOME: /var/solr + SOLR_PORT: 8983 + SOLR_NODE_PORT: 8983 + SOLR_logS_DIR: /var/solr/logs + SOLR_log_LEVEL: DEBUG + SOLR_PORT_ADVERTISE: 8983 + CLUSTER_NAME: solr-without-toleration + POD_HOSTNAME: solr-without-toleration-0 (v1:metadata.name) + POD_NAME: solr-without-toleration-0 (v1:metadata.name) + POD_IP: (v1:status.podIP) + GOVERNING_SERVICE: solr-without-toleration-pods + POD_NAMESPACE: demo (v1:metadata.namespace) + SOLR_HOST: $(POD_NAME).$(GOVERNING_SERVICE).$(POD_NAMESPACE) + ZK_HOST: zoo-0.zoo-pods.demo.svc.cluster.local:2181,zoo-1.zoo-pods.demo.svc.cluster.local:2181,zoo-2.zoo-pods.demo.svc.cluster.local:2181/demosolr-without-toleration + ZK_SERVER: zoo-0.zoo-pods.demo.svc.cluster.local:2181,zoo-1.zoo-pods.demo.svc.cluster.local:2181,zoo-2.zoo-pods.demo.svc.cluster.local:2181 + ZK_CHROOT: /demosolr-without-toleration + SOLR_MODULES: + JAVA_OPTS: + CONNECTION_SCHEME: http + SECURITY_ENABLED: true + SOLR_USER: Optional: false + SOLR_PASSWORD: Optional: false + SOLR_OPTS: -DhostPort=$(SOLR_NODE_PORT) -Dsolr.autoSoftCommit.maxTime=1000 -DzkACLProvider=org.apache.solr.common.cloud.DigestZkACLProvider -DzkCredentialsInjector=org.apache.solr.common.cloud.VMParamsZkCredentialsInjector -DzkCredentialsProvider=org.apache.solr.common.cloud.DigestZkCredentialsProvider -DzkDigestPassword=nwbnmVwBoJhW)eft -DzkDigestReadonlyPassword=7PxFSc)z~DWLL)Tt -DzkDigestReadonlyUsername=zk-digest-readonly -DzkDigestUsername=zk-digest + ZK_CREDS_AND_ACLS: -DzkACLProvider=org.apache.solr.common.cloud.DigestZkACLProvider -DzkCredentialsInjector=org.apache.solr.common.cloud.VMParamsZkCredentialsInjector -DzkCredentialsProvider=org.apache.solr.common.cloud.DigestZkCredentialsProvider -DzkDigestPassword=nwbnmVwBoJhW)eft -DzkDigestReadonlyPassword=7PxFSc)z~DWLL)Tt -DzkDigestReadonlyUsername=zk-digest-readonly -DzkDigestUsername=zk-digest + SOLR_ZK_CREDS_AND_ACLS: -DzkACLProvider=org.apache.solr.common.cloud.DigestZkACLProvider -DzkCredentialsInjector=org.apache.solr.common.cloud.VMParamsZkCredentialsInjector -DzkCredentialsProvider=org.apache.solr.common.cloud.DigestZkCredentialsProvider -DzkDigestPassword=nwbnmVwBoJhW)eft -DzkDigestReadonlyPassword=7PxFSc)z~DWLL)Tt -DzkDigestReadonlyUsername=zk-digest-readonly -DzkDigestUsername=zk-digest + Mounts: + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-n9nxh (ro) + /var/solr from slconfig (rw) + /var/solr/data from solr-without-toleration-data (rw) +Conditions: + Type Status + PodScheduled False +Volumes: + solr-without-toleration-data: + Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace) + ClaimName: solr-without-toleration-data-solr-without-toleration-0 + ReadOnly: false + default-config: + Type: Secret (a volume populated by a Secret) + SecretName: solr-without-toleration-config + Optional: false + slconfig: + Type: EmptyDir (a temporary directory that shares a pod's lifetime) + Medium: + SizeLimit: + auth-config: + Type: Secret (a volume populated by a Secret) + SecretName: solr-without-toleration-auth-config + Optional: false + kube-api-access-n9nxh: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + ConfigMapOptional: + DownwardAPI: true +QoS Class: Burstable +Node-Selectors: +Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal NotTriggerScaleUp 106s cluster-autoscaler pod didn't trigger scale-up: + Warning FailedScheduling 104s (x2 over 106s) default-scheduler 0/9 nodes are available: 1 node(s) had untolerated taint {key1: node1}, 1 node(s) had untolerated taint {key1: node2}, 1 node(s) had untolerated taint {key1: node3}, 1 node(s) had untolerated taint {key1: node4}, 1 node(s) had untolerated taint {key1: node5}, 1 node(s) had untolerated taint {key1: node6}, 1 node(s) had untolerated taint {key1: node7}, 1 node(s) had untolerated taint {key1: node8}, 1 node(s) had untolerated taint {key1: node9}. preemption: 0/9 nodes are available: 9 Preemption is not helpful for scheduling. +``` +Here we can see that the pod has no tolerations for the tainted nodes and because of that the pod is not able to scheduled. + +So, let's add proper tolerations and create another Solr. Here is the yaml we are going to apply, +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-with-toleration + namespace: demo +spec: + version: 9.6.1 + replicas: 2 + podTemplate: + spec: + tolerations: + - key: "key1" + operator: "Equal" + value: "node7" + effect: "NoSchedule" + - key: "key1" + operator: "Equal" + value: "node8" + effect: "NoSchedule" + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +``` + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/Solr/configuration/solr-with-tolerations.yaml +solr.kubedb.com/solr-with-tolerations created +``` +Now, wait a few minutes. KubeDB operator will create necessary petset, services, secret etc. If everything goes well, we will see that a pod with the name `sdb-with-tolerations-0` has been created. + +Check that the petset's pod is running + +```bash +$ kubectl get pod -n demo -l app.kubernetes.io/instance=solr-with-toleration +NAME READY STATUS RESTARTS AGE +solr-with-toleration-0 1/1 Running 0 2m12s +solr-with-toleration-1 1/1 Running 0 80s +``` +As we see the pod is running, you can verify that by running `kubectl get pods -n demo sdb-with-tolerations-0 -o wide` and looking at the “NODE” to which the Pod was assigned. +```bash +$ kubectl get pod -n demo -l app.kubernetes.io/instance=solr-with-toleration -owide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +solr-with-toleration-0 1/1 Running 0 2m37s 10.12.3.7 gke-pritam-default-pool-dadbf4db-5fv5 +solr-with-toleration-1 1/1 Running 0 105s 10.12.5.5 gke-pritam-default-pool-dadbf4db-5vkv +``` +We can successfully verify that our pod was scheduled to the node which it has tolerations. + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete solr -n demo solr-misc-config solr-without-toleration solr-with-toleration + +kubectl delete ns demo +``` + +If you would like to uninstall KubeDB operator, please follow the steps [here](/docs/setup/README.md). + + +## Next Steps + +- Detail concepts of [Solr object](/docs/guides/solr/concepts/solr.md). +- Different Solr topology clustering modes [here](/docs/guides/solr/clustering/topology_cluster.md). +- Monitor your Solr database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/solr/monitoring/prometheus-operator.md). + +- Monitor your Solr database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/solr/monitoring/prometheus-builtin.md) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). \ No newline at end of file diff --git a/docs/guides/solr/monitoring/_index.md b/docs/guides/solr/monitoring/_index.md new file mode 100644 index 0000000000..94235d8ee0 --- /dev/null +++ b/docs/guides/solr/monitoring/_index.md @@ -0,0 +1,10 @@ +--- +title: Monitoring +menu: + docs_{{ .version }}: + identifier: sl-monitoring-solr + name: Monitoring + parent: sl-solr-guides + weight: 36 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/solr/monitoring/overview.md b/docs/guides/solr/monitoring/overview.md new file mode 100644 index 0000000000..7297dbc41a --- /dev/null +++ b/docs/guides/solr/monitoring/overview.md @@ -0,0 +1,108 @@ +--- +title: Overview +menu: + docs_{{ .version }}: + identifier: sl-overview-monitoring-solr + name: Overview + parent: sl-monitoring-solr + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + + + + +> New to KubeDB? Please start [here](/docs/README.md). + +# Monitoring Solr with KubeDB + +KubeDB has native support for monitoring via [Prometheus](https://prometheus.io/). You can use builtin [Prometheus](https://github.com/prometheus/prometheus) scraper or [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) to monitor KubeDB managed databases. This tutorial will show you how database monitoring works with KubeDB and how to configure Database crd to enable monitoring. + +## Overview + +KubeDB uses Prometheus [exporter](https://prometheus.io/docs/instrumenting/exporters/#databases) images to export Prometheus metrics for respective databases. Following diagram shows the logical flow of database monitoring with KubeDB. + +

+  Database Monitoring Flow +

+ +When a user creates a database crd with `spec.monitor` section configured, KubeDB operator provisions the respective database and injects an exporter image as sidecar to the database pod. It also creates a dedicated stats service with name `{database-crd-name}-stats` for monitoring. Prometheus server can scrape metrics using this stats service. + +## Configure Monitoring + +In order to enable monitoring for a database, you have to configure `spec.monitor` section. KubeDB provides following options to configure `spec.monitor` section: + +| Field | Type | Uses | +| -------------------------------------------------- | ---------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | +| `spec.monitor.agent` | `Required` | Type of the monitoring agent that will be used to monitor this database. It can be `prometheus.io/builtin` or `prometheus.io/operator`. | +| `spec.monitor.prometheus.exporter.port` | `Optional` | Port number where the exporter side car will serve metrics. | +| `spec.monitor.prometheus.exporter.args` | `Optional` | Arguments to pass to the exporter sidecar. | +| `spec.monitor.prometheus.exporter.env` | `Optional` | List of environment variables to set in the exporter sidecar container. | +| `spec.monitor.prometheus.exporter.resources` | `Optional` | Resources required by exporter sidecar container. | +| `spec.monitor.prometheus.exporter.securityContext` | `Optional` | Security options the exporter should run with. | +| `spec.monitor.prometheus.serviceMonitor.labels` | `Optional` | Labels for `ServiceMonitor` crd. | +| `spec.monitor.prometheus.serviceMonitor.interval` | `Optional` | Interval at which metrics should be scraped. | + +## Sample Configuration + +A sample YAML for Redis crd with `spec.monitor` section configured to enable monitoring with [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) is shown below. + +```yaml +apiVersion: kubedb.com/v1 +kind: Redis +metadata: + name: sample-redis + namespace: databases +spec: + version: 6.0.20 + deletionPolicy: WipeOut + configSecret: # configure Redis to use password for authentication + name: redis-config + storageType: Durable + storage: + storageClassName: default + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + monitor: + agent: prometheus.io/operator + prometheus: + serviceMonitor: + labels: + release: prometheus + exporter: + args: + - --redis.password=$(REDIS_PASSWORD) + env: + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: _name_of_secret_with_redis_password + key: password # key with the password + resources: + requests: + memory: 512Mi + cpu: 200m + limits: + memory: 512Mi + cpu: 250m + securityContext: + runAsUser: 2000 + allowPrivilegeEscalation: false +``` + +Assume that above Redis is configured to use basic authentication. So, exporter image also need to provide password to collect metrics. We have provided it through `spec.monitor.args` field. + +Here, we have specified that we are going to monitor this server using Prometheus operator through `spec.monitor.agent: prometheus.io/operator`. KubeDB will create a `ServiceMonitor` crd in `monitoring` namespace and this `ServiceMonitor` will have `release: prometheus` label. + +## Next Steps + +- Learn how to monitor Elasticsearch database with KubeDB using [builtin-Prometheus](/docs/guides/elasticsearch/monitoring/using-builtin-prometheus.md) and using [Prometheus operator](/docs/guides/elasticsearch/monitoring/using-prometheus-operator.md). +- Learn how to monitor PostgreSQL database with KubeDB using [builtin-Prometheus](/docs/guides/postgres/monitoring/using-builtin-prometheus.md) and using [Prometheus operator](/docs/guides/postgres/monitoring/using-prometheus-operator.md). +- Learn how to monitor MySQL database with KubeDB using [builtin-Prometheus](/docs/guides/mysql/monitoring/builtin-prometheus/index.md) and using [Prometheus operator](/docs/guides/mysql/monitoring/prometheus-operator/index.md). +- Learn how to monitor MongoDB database with KubeDB using [builtin-Prometheus](/docs/guides/mongodb/monitoring/using-builtin-prometheus.md) and using [Prometheus operator](/docs/guides/mongodb/monitoring/using-prometheus-operator.md). +- Learn how to monitor Redis server with KubeDB using [builtin-Prometheus](/docs/guides/redis/monitoring/using-builtin-prometheus.md) and using [Prometheus operator](/docs/guides/redis/monitoring/using-prometheus-operator.md). +- Learn how to monitor Memcached server with KubeDB using [builtin-Prometheus](/docs/guides/memcached/monitoring/using-builtin-prometheus.md) and using [Prometheus operator](/docs/guides/memcached/monitoring/using-prometheus-operator.md). diff --git a/docs/guides/solr/monitoring/prometheus-builtin.md b/docs/guides/solr/monitoring/prometheus-builtin.md new file mode 100644 index 0000000000..161d6c90b3 --- /dev/null +++ b/docs/guides/solr/monitoring/prometheus-builtin.md @@ -0,0 +1,364 @@ +--- +title: Prometheus Builtin +menu: + docs_{{ .version }}: + identifier: sl-promtheus-builtin-monitoring-solr + name: Prometheus Builtin + parent: sl-monitoring-solr + weight: 50 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Monitoring Solr with builtin Prometheus + +This tutorial will show you how to monitor Solr database using builtin [Prometheus](https://github.com/prometheus/prometheus) scraper. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- If you are not familiar with how to configure Prometheus to scrape metrics from various Kubernetes resources, please read the tutorial from [here](https://github.com/appscode/third-party-tools/tree/master/monitoring/prometheus/builtin). + +- To learn how Prometheus monitoring works with KubeDB in general, please visit [here](/docs/guides/elasticsearch/monitoring/overview.md). + +- To keep Prometheus resources isolated, we are going to use a separate namespace called `monitoring` to deploy respective monitoring resources. We are going to deploy database in `demo` namespace. + + ```bash + $ kubectl create ns monitoring + namespace/monitoring created + + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/solr](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/elasticsearch) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Deploy Solr with Monitoring Enabled + +At first, let's deploy an Elasticsearch database with monitoring enabled. Below is the Elasticsearch object that we are going to create. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: builtin-prom-sl + namespace: demo +spec: + version: 9.6.1 + replicas: 2 + enableSSL: true + monitor: + agent: prometheus.io/builtin + solrModules: + - prometheus-exporter + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + +``` + +Here, + +- `spec.monitor.agent: prometheus.io/builtin` specifies that we are going to monitor this server using builtin Prometheus scraper. + +Let's create the Elasticsearch crd we have shown above. + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/monitoring/solr-builtin.yaml +solr.kubedb.com/builtin-prom-sl created +``` + +Now, wait for the database to go into `Running` state. + +```bash +$ kubectl get sl -n demo +NAME TYPE VERSION STATUS AGE +builtin-prom-sl kubedb.com/v1alpha2 9.6.1 Ready 59m +``` + +KubeDB will create a separate stats service with name `{Solr crd name}-stats` for monitoring purpose. + +```bash +$ kubectl get svc -n demo -l 'app.kubernetes.io/instance=builtin-prom-sl' +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +builtin-prom-sl ClusterIP 10.96.94.160 8983/TCP 59m +builtin-prom-sl-pods ClusterIP None 8983/TCP 59m +builtin-prom-sl-stats ClusterIP 10.96.157.93 9854/TCP 59m +``` + +Here, `builtin-prom-sl-stats` service has been created for monitoring purpose. Let's describe the service. + +```bash +$ kubectl describe svc -n demo builtin-prom-sl-stats +Name: builtin-prom-sl-stats +Namespace: demo +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=builtin-prom-sl + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=solrs.kubedb.com + kubedb.com/role=stats +Annotations: monitoring.appscode.com/agent: prometheus.io/builtin + prometheus.io/path: /metrics + prometheus.io/port: 9854 + prometheus.io/scrape: true +Selector: app.kubernetes.io/instance=builtin-prom-sl,app.kubernetes.io/managed-by=kubedb.com,app.kubernetes.io/name=solrs.kubedb.com +Type: ClusterIP +IP Family Policy: SingleStack +IP Families: IPv4 +IP: 10.96.157.93 +IPs: 10.96.157.93 +Port: metrics 9854/TCP +TargetPort: metrics/TCP +Endpoints: 10.244.0.54:9854,10.244.0.56:9854 +Session Affinity: None +Events: +``` + +You can see that the service contains following annotations. + +```bash +prometheus.io/path: /metrics +prometheus.io/port: 9854 +prometheus.io/scrape: true +``` + +The Prometheus server will discover the service endpoint using these specifications and will scrape metrics from the exporter. + +## Configure Prometheus Server + +Now, we have to configure a Prometheus scraping job to scrape the metrics using this service. We are going to configure scraping job similar to this [kubernetes-service-endpoints](https://github.com/appscode/third-party-tools/tree/master/monitoring/prometheus/builtin#kubernetes-service-endpoints) job that scrapes metrics from endpoints of a service. + +Let's configure a Prometheus scraping job to collect metrics from this service. + +```yaml +- job_name: 'kubedb-databases' + honor_labels: true + scheme: http + kubernetes_sd_configs: + - role: endpoints + # by default Prometheus server select all Kubernetes services as possible target. + # relabel_config is used to filter only desired endpoints + relabel_configs: + # keep only those services that has "prometheus.io/scrape","prometheus.io/path" and "prometheus.io/port" anootations + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape, __meta_kubernetes_service_annotation_prometheus_io_port] + separator: ; + regex: true;(.*) + action: keep + # currently KubeDB supported databases uses only "http" scheme to export metrics. so, drop any service that uses "https" scheme. + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: drop + regex: https + # only keep the stats services created by KubeDB for monitoring purpose which has "-stats" suffix + - source_labels: [__meta_kubernetes_service_name] + separator: ; + regex: (.*-stats) + action: keep + # service created by KubeDB will have "app.kubernetes.io/name" and "app.kubernetes.io/instance" annotations. keep only those services that have these annotations. + - source_labels: [__meta_kubernetes_service_label_app_kubernetes_io_name] + separator: ; + regex: (.*) + action: keep + # read the metric path from "prometheus.io/path: " annotation + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + # read the port from "prometheus.io/port: " annotation and update scraping address accordingly + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + # add service namespace as label to the scraped metrics + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: namespace + replacement: $1 + action: replace + # add service name as a label to the scraped metrics + - source_labels: [__meta_kubernetes_service_name] + separator: ; + regex: (.*) + target_label: service + replacement: $1 + action: replace + # add stats service's labels to the scraped metrics + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) +``` + +### Configure Existing Prometheus Server + +If you already have a Prometheus server running, you have to add above scraping job in the `ConfigMap` used to configure the Prometheus server. Then, you have to restart it for the updated configuration to take effect. + +>If you don't use a persistent volume for Prometheus storage, you will lose your previously scraped data on restart. + +### Deploy New Prometheus Server + +If you don't have any existing Prometheus server running, you have to deploy one. In this section, we are going to deploy a Prometheus server in `monitoring` namespace to collect metrics using this stats service. + +**Create ConfigMap:** + +At first, create a ConfigMap with the scraping configuration. Bellow, the YAML of ConfigMap that we are going to create in this tutorial. + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus-config + labels: + app: prometheus-demo + namespace: monitoring +data: + prometheus.yml: |- + global: + scrape_interval: 5s + evaluation_interval: 5s + scrape_configs: + - job_name: 'kubedb-databases' + honor_labels: true + scheme: http + kubernetes_sd_configs: + - role: endpoints + # by default Prometheus server select all Kubernetes services as possible target. + # relabel_config is used to filter only desired endpoints + relabel_configs: + # keep only those services that has "prometheus.io/scrape","prometheus.io/path" and "prometheus.io/port" anootations + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape, __meta_kubernetes_service_annotation_prometheus_io_port] + separator: ; + regex: true;(.*) + action: keep + # currently KubeDB supported databases uses only "http" scheme to export metrics. so, drop any service that uses "https" scheme. + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: drop + regex: https + # only keep the stats services created by KubeDB for monitoring purpose which has "-stats" suffix + - source_labels: [__meta_kubernetes_service_name] + separator: ; + regex: (.*-stats) + action: keep + # service created by KubeDB will have "app.kubernetes.io/name" and "app.kubernetes.io/instance" annotations. keep only those services that have these annotations. + - source_labels: [__meta_kubernetes_service_label_app_kubernetes_io_name] + separator: ; + regex: (.*) + action: keep + # read the metric path from "prometheus.io/path: " annotation + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + # read the port from "prometheus.io/port: " annotation and update scraping address accordingly + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + # add service namespace as label to the scraped metrics + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: namespace + replacement: $1 + action: replace + # add service name as a label to the scraped metrics + - source_labels: [__meta_kubernetes_service_name] + separator: ; + regex: (.*) + target_label: service + replacement: $1 + action: replace + # add stats service's labels to the scraped metrics + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) +``` + +Let's create above `ConfigMap`, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/monitoring/builtin-prometheus/prom-config.yaml +configmap/prometheus-config created +``` + +**Create RBAC:** + +If you are using an RBAC enabled cluster, you have to give necessary RBAC permissions for Prometheus. Let's create necessary RBAC stuffs for Prometheus, + +```bash +$ kubectl apply -f https://github.com/appscode/third-party-tools/raw/master/monitoring/prometheus/builtin/artifacts/rbac.yaml +clusterrole.rbac.authorization.k8s.io/prometheus created +serviceaccount/prometheus created +clusterrolebinding.rbac.authorization.k8s.io/prometheus created +``` + +>YAML for the RBAC resources created above can be found [here](https://github.com/appscode/third-party-tools/blob/master/monitoring/prometheus/builtin/artifacts/rbac.yaml). + +**Deploy Prometheus:** + +Now, we are ready to deploy Prometheus server. We are going to use following [deployment](https://github.com/appscode/third-party-tools/blob/master/monitoring/prometheus/builtin/artifacts/deployment.yaml) to deploy Prometheus server. + +Let's deploy the Prometheus server. + +```bash +$ kubectl apply -f https://github.com/appscode/third-party-tools/raw/master/monitoring/prometheus/builtin/artifacts/deployment.yaml +deployment.apps/prometheus created +``` + +### Verify Monitoring Metrics + +Prometheus server is listening to port `9090`. We are going to use [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to access Prometheus dashboard. + +At first, let's check if the Prometheus pod is in `Running` state. + +```bash +$ kubectl get pod -n monitoring -l=app=prometheus +NAME READY STATUS RESTARTS AGE +prometheus-8568c86d86-95zhn 1/1 Running 0 77s +``` + +Now, run following command on a separate terminal to forward 9090 port of `prometheus-8568c86d86-95zhn` pod, + +```bash +$ kubectl port-forward -n monitoring prometheus-8568c86d86-95zhn 9090 +Forwarding from 127.0.0.1:9090 -> 9090 +Forwarding from [::1]:9090 -> 9090 +``` + +Now, we can access the dashboard at `localhost:9090`. Open [http://localhost:9090](http://localhost:9090) in your browser. You should see the endpoint of `builtin-prom-es-stats` service as one of the targets. + +

+  Prometheus Target +

+ +Check the labels in image. These labels confirm that the metrics are coming from `Solr` database `builtin-prom-sl` through stats service `builtin-prom-sls-stats`. + +Now, you can view the collected metrics and create a graph from homepage of this Prometheus dashboard. You can also use this Prometheus server as data source for [Grafana](https://grafana.com/) and create beautiful dashboard with collected metrics. + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run following commands + +```bash +$ kubectl delete -n demo es/builtin-prom-es + +$ kubectl delete -n monitoring deployment.apps/prometheus + +$ kubectl delete -n monitoring clusterrole.rbac.authorization.k8s.io/prometheus +$ kubectl delete -n monitoring serviceaccount/prometheus +$ kubectl delete -n monitoring clusterrolebinding.rbac.authorization.k8s.io/prometheus + +$ kubectl delete ns demo +$ kubectl delete ns monitoring +``` \ No newline at end of file diff --git a/docs/guides/solr/monitoring/prometheus-operator.md b/docs/guides/solr/monitoring/prometheus-operator.md new file mode 100644 index 0000000000..cf416c3b3f --- /dev/null +++ b/docs/guides/solr/monitoring/prometheus-operator.md @@ -0,0 +1,377 @@ +--- +title: Prometheus Operator +menu: + docs_{{ .version }}: + identifier: sl-promtheus-operator-monitoring-solr + name: Prometheus Operator + parent: sl-monitoring-solr + weight: 40 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Monitoring Solr Using Prometheus operator + +[Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) provides simple and Kubernetes native way to deploy and configure Prometheus server. This tutorial will show you how to use Prometheus operator to monitor Elasticsearch database deployed with KubeDB. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- To learn how Prometheus monitoring works with KubeDB in general, please visit [here](/docs/guides/elasticsearch/monitoring/overview.md). + +- To keep Prometheus resources isolated, we are going to use a separate namespace called `monitoring` to deploy respective monitoring resources. We are going to deploy database in `demo` namespace. + +```bash + $ kubectl create ns monitoring + namespace/monitoring created + + $ kubectl create ns demo + namespace/demo created + ``` + +- We need a [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) instance running. If you don't already have a running instance, deploy one following the docs from [here](https://github.com/appscode/third-party-tools/blob/master/monitoring/prometheus/operator/README.md). + +- If you already don't have a Prometheus server running, deploy one following tutorial from [here](https://github.com/appscode/third-party-tools/blob/master/monitoring/prometheus/operator/README.md#deploy-prometheus-server). + +> Note: YAML files used in this tutorial are stored in [docs/examples/elasticsearch](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/elasticsearch) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Find out required labels for ServiceMonitor + +We need to know the labels used to select `ServiceMonitor` by a `Prometheus` crd. We are going to provide these labels in `spec.monitor.prometheus.labels` field of Elasticsearch crd so that KubeDB creates `ServiceMonitor` object accordingly. + +At first, let's find out the available Prometheus server in our cluster. + +```bash +$ kubectl get prometheus -A +NAMESPACE NAME VERSION DESIRED READY RECONCILED AVAILABLE AGE +monitoring prometheus-kube-prometheus-prometheus v2.54.1 1 1 True True 11d +``` + +> If you don't have any Prometheus server running in your cluster, deploy one following the guide specified in **Before You Begin** section. + +Now, let's view the YAML of the available Prometheus server `prometheus` in `monitoring` namespace. + +```bash +$ kubectl get prometheus -n monitoring prometheus-kube-prometheus-prometheus -oyaml +apiVersion: monitoring.coreos.com/v1 +kind: Prometheus +metadata: + annotations: + meta.helm.sh/release-name: prometheus + meta.helm.sh/release-namespace: monitoring + creationTimestamp: "2024-10-17T08:16:24Z" + generation: 1 + labels: + app: kube-prometheus-stack-prometheus + app.kubernetes.io/instance: prometheus + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kube-prometheus-stack + app.kubernetes.io/version: 65.2.0 + chart: kube-prometheus-stack-65.2.0 + heritage: Helm + release: prometheus + name: prometheus-kube-prometheus-prometheus + namespace: monitoring + resourceVersion: "632331" + uid: 9cbf73fe-d9b5-456d-becf-770e07e298af +spec: + alerting: + alertmanagers: + - apiVersion: v2 + name: prometheus-kube-prometheus-alertmanager + namespace: monitoring + pathPrefix: / + port: http-web + automountServiceAccountToken: true + enableAdminAPI: false + evaluationInterval: 30s + externalUrl: http://prometheus-kube-prometheus-prometheus.monitoring:9090 + hostNetwork: false + image: quay.io/prometheus/prometheus:v2.54.1 + listenLocal: false + logFormat: logfmt + logLevel: info + paused: false + podMonitorNamespaceSelector: {} + podMonitorSelector: + matchLabels: + release: prometheus + portName: http-web + probeNamespaceSelector: {} + probeSelector: + matchLabels: + release: prometheus + replicas: 1 + retention: 10d + routePrefix: / + ruleNamespaceSelector: {} + ruleSelector: + matchLabels: + release: prometheus + scrapeConfigNamespaceSelector: {} + scrapeConfigSelector: + matchLabels: + release: prometheus + scrapeInterval: 30s + securityContext: + fsGroup: 2000 + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + serviceAccountName: prometheus-kube-prometheus-prometheus + serviceMonitorNamespaceSelector: {} + serviceMonitorSelector: + matchLabels: + release: prometheus + shards: 1 + tsdb: + outOfOrderTimeWindow: 0s + version: v2.54.1 + walCompression: true +status: + availableReplicas: 1 + conditions: + - lastTransitionTime: "2024-10-29T06:44:10Z" + message: "" + observedGeneration: 1 + reason: "" + status: "True" + type: Available + - lastTransitionTime: "2024-10-29T06:44:10Z" + message: "" + observedGeneration: 1 + reason: "" + status: "True" + type: Reconciled + paused: false + replicas: 1 + selector: app.kubernetes.io/instance=prometheus-kube-prometheus-prometheus,app.kubernetes.io/managed-by=prometheus-operator,app.kubernetes.io/name=prometheus,operator.prometheus.io/name=prometheus-kube-prometheus-prometheus,prometheus=prometheus-kube-prometheus-prometheus + shardStatuses: + - availableReplicas: 1 + replicas: 1 + shardID: "0" + unavailableReplicas: 0 + updatedReplicas: 1 + shards: 1 + unavailableReplicas: 0 + updatedReplicas: 1 +``` + +Notice the `spec.serviceMonitorSelector` section. Here, `release: prometheus` label is used to select `ServiceMonitor` crd. So, we are going to use this label in `spec.monitor.prometheus.labels` field of Solr crd. + +## Deploy Solr with Monitoring Enabled + +At first, let's deploy an Solr database with monitoring enabled. Below is the Solr object that we are going to create. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: operator-prom-sl + namespace: demo +spec: + version: 9.4.1 + replicas: 2 + monitor: + agent: prometheus.io/operator + prometheus: + serviceMonitor: + labels: + release: prometheus + interval: 10s + solrModules: + - s3-repository + - gcs-repository + - prometheus-exporter + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + +``` + +Here, + +- `monitor.agent: prometheus.io/operator` indicates that we are going to monitor this server using Prometheus operator. +- `monitor.prometheus.namespace: monitoring` specifies that KubeDB should create `ServiceMonitor` in `monitoring` namespace. + +- `monitor.prometheus.labels` specifies that KubeDB should create `ServiceMonitor` with these labels. + +- `monitor.prometheus.interval` indicates that the Prometheus server should scrape metrics from this database with 10 seconds interval. + +Let's create the Elasticsearch object that we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/monitoring/solr-operator.yaml +solr.kubedb.com/operator-prom-sl created +``` + +Now, wait for the database to go into `Running` state. + +```bash +$ kubectl get sl -n demo +NAME TYPE VERSION STATUS AGE +operator-prom-sl kubedb.com/v1alpha2 9.6.1 Ready 104m +``` + +KubeDB will create a separate stats service with name `{Solr crd name}-stats` for monitoring purpose. + +```bash +$ kubectl get svc -n demo -l 'app.kubernetes.io/instance=operator-prom-sl' +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +operator-prom-sl ClusterIP 10.96.76.207 8983/TCP 122m +operator-prom-sl-pods ClusterIP None 8983/TCP 122m +operator-prom-sl-stats ClusterIP 10.96.192.50 9854/TCP 122m +``` + +Here, `operator-prom-sl-stats` service has been created for monitoring purpose. + +Let's describe this stats service. + +```bash +$ kubectl describe svc -n demo operator-prom-sl-stats +Name: operator-prom-sl-stats +Namespace: demo +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=operator-prom-sl + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=solrs.kubedb.com + kubedb.com/role=stats +Annotations: monitoring.appscode.com/agent: prometheus.io/operator +Selector: app.kubernetes.io/instance=operator-prom-sl,app.kubernetes.io/managed-by=kubedb.com,app.kubernetes.io/name=solrs.kubedb.com +Type: ClusterIP +IP Family Policy: SingleStack +IP Families: IPv4 +IP: 10.96.192.50 +IPs: 10.96.192.50 +Port: metrics 9854/TCP +TargetPort: metrics/TCP +Endpoints: 10.244.0.37:9854,10.244.0.39:9854 +Session Affinity: None +Events: +``` + +Notice the `Labels` and `Port` fields. `ServiceMonitor` will use these information to target its endpoints. + +KubeDB will also create a `ServiceMonitor` crd in `monitoring` namespace that select the endpoints of `coreos-prom-es-stats` service. Verify that the `ServiceMonitor` crd has been created. + +```bash +$ kubectl get servicemonitor -n demo +NAME AGE +operator-prom-sl-stats 125m +``` + +Let's verify that the `ServiceMonitor` has the label that we had specified in `spec.monitor` section of Elasticsearch crd. + +```bash +$ kubectl get servicemonitor -n demo operator-prom-sl-stats -oyaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + creationTimestamp: "2024-10-29T06:44:10Z" + generation: 1 + labels: + app.kubernetes.io/component: database + app.kubernetes.io/instance: operator-prom-sl + app.kubernetes.io/managed-by: kubedb.com + app.kubernetes.io/name: solrs.kubedb.com + release: prometheus + name: operator-prom-sl-stats + namespace: demo + ownerReferences: + - apiVersion: v1 + blockOwnerDeletion: true + controller: true + kind: Service + name: operator-prom-sl-stats + uid: 6728f31e-8840-45ff-b6bd-f9c5839c74a6 + resourceVersion: "632313" + uid: 74194ab9-d148-4d84-b973-764606f5f7b6 +spec: + endpoints: + - honorLabels: true + interval: 10s + path: /metrics + port: metrics + namespaceSelector: + matchNames: + - demo + selector: + matchLabels: + app.kubernetes.io/component: database + app.kubernetes.io/instance: operator-prom-sl + app.kubernetes.io/managed-by: kubedb.com + app.kubernetes.io/name: solrs.kubedb.com + kubedb.com/role: stats +``` + +Notice that the `ServiceMonitor` has label `release: prometheus` that we had specified in Solr crd. + +Also notice that the `ServiceMonitor` has selector which match the labels we have seen in the `operator-prom-sl-stats` service. It also, target the `prom-http` port that we have seen in the stats service. + +## Verify Monitoring Metrics + +At first, let's find out the respective Prometheus pod for `prometheus` Prometheus server. + +```bash +$ kubectl get pod -n monitoring -l=release=prometheus +NAME READY STATUS RESTARTS AGE +prometheus-kube-prometheus-operator-6c8698f59d-cljvq 1/1 Running 12 (4h11m ago) 12d +prometheus-kube-state-metrics-5548456c74-ksh5n 1/1 Running 13 (4h10m ago) 12d +prometheus-prometheus-node-exporter-n5ht8 1/1 Running 9 (4h11m ago) 12d +``` + +Prometheus server is listening to port `9090` of `prometheus-kube-prometheus-operator-6c8698f59d-cljvq` pod. We are going to use [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to access Prometheus dashboard. + +Run following command on a separate terminal to forward the port 9090 of `prometheus-prometheus-0` pod, + +```bash +$ kubectl port-forward -n monitoring prometheus-kube-prometheus-operator-6c8698f59d-cljvq 9090 +Forwarding from 127.0.0.1:9090 -> 9090 +Forwarding from [::1]:9090 -> 9090 +``` + +Now, we can access the dashboard at `localhost:9090`. Open [http://localhost:9090](http://localhost:9090) in your browser. You should see `prom-http` endpoint of `operator-prom-sl-stats` service as one of the targets. + +

+  Prometheus Target +

+ +Check the `endpoint` and `service` labels marked by red rectangle. It verifies that the target is our expected database. Now, you can view the collected metrics and create a graph from homepage of this Prometheus dashboard. You can also use this Prometheus server as data source for [Grafana](https://grafana.com/) and create beautiful dashboard with collected metrics. + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run following commands + +```bash +# cleanup database +kubectl delete -n demo sl/operator-prom-sl + +# cleanup prometheus resources +kubectl delete -n monitoring prometheus prometheus +kubectl delete -n monitoring clusterrolebinding prometheus +kubectl delete -n monitoring clusterrole prometheus +kubectl delete -n monitoring serviceaccount prometheus +kubectl delete -n monitoring service prometheus-operated + +# cleanup prometheus operator resources +kubectl delete -n monitoring deployment prometheus-operator +kubectl delete -n dmeo serviceaccount prometheus-operator +kubectl delete clusterrolebinding prometheus-operator +kubectl delete clusterrole prometheus-operator + +# delete namespace +kubectl delete ns monitoring +kubectl delete ns demo +``` diff --git a/docs/guides/solr/quickstart/overview/index.md b/docs/guides/solr/quickstart/overview/index.md index 2622cd6a2b..baabed70e6 100644 --- a/docs/guides/solr/quickstart/overview/index.md +++ b/docs/guides/solr/quickstart/overview/index.md @@ -116,13 +116,13 @@ Let's create the ZooKeeper CR that is shown above: ```bash $ $ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/solr/quickstart/overview/yamls/zookeeper/zookeeper.yaml -ZooKeeper.kubedb.com/es-quickstart created +zooKeeper.kubedb.com/zoo-com created ``` The ZooKeeper's `STATUS` will go from `Provisioning` to `Ready` state within few minutes. Once the `STATUS` is `Ready`, you are ready to use the database. ```bash -$ kubectl get ZooKeeper -n demo -w +$ kubectl get zookeeper -n demo -w NAME TYPE VERSION STATUS AGE zoo-com kubedb.com/v1alpha2 3.7.2 Ready 13m ``` diff --git a/docs/guides/solr/reconfigure-tls/_index.md b/docs/guides/solr/reconfigure-tls/_index.md new file mode 100644 index 0000000000..b3abd6022e --- /dev/null +++ b/docs/guides/solr/reconfigure-tls/_index.md @@ -0,0 +1,10 @@ +--- +title: Run Solr With TLS +menu: + docs_{{ .version }}: + identifier: sl-reconfigure-tls + name: Reconfigure TLS + parent: sl-solr-guides + weight: 32 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/solr/reconfigure-tls/overview.md b/docs/guides/solr/reconfigure-tls/overview.md new file mode 100644 index 0000000000..8c7057fd0c --- /dev/null +++ b/docs/guides/solr/reconfigure-tls/overview.md @@ -0,0 +1,54 @@ +--- +title: Reconfiguring TLS/SSL +menu: + docs_{{ .version }}: + identifier: sl-reconfigure-tls-overview + name: Overview + parent: sl-reconfigure-tls + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfiguring TLS of Solr + +This guide will give an overview on how KubeDB Ops-manager operator reconfigures TLS configuration i.e. add TLS, remove TLS, update issuer/cluster issuer or Certificates and rotate the certificates of `Solr`. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [Solr](/docs/guides/solr/concepts/solr.md) + - [SolrOpsRequest](/docs/guides/solr/concepts/solropsrequests.md) + +## How Reconfiguring Solr TLS Configuration Process Works + +The following diagram shows how KubeDB Ops-manager operator reconfigures TLS of a `Solr`. Open the image in a new tab to see the enlarged version. + +
+  Reconfiguring TLS process of Solr +
Fig: Reconfiguring TLS process of Solr
+
+ +The Reconfiguring Solr TLS process consists of the following steps: + +1. At first, a user creates a `Solr` Custom Resource Object (CRO). + +2. `KubeDB` Provisioner operator watches the `Solr` CRO. + +3. When the operator finds a `Solr` CR, it creates required number of `PetSets` and related necessary stuff like secrets, services, etc. + +4. Then, in order to reconfigure the TLS configuration of the `Solr` database the user creates a `SolrOpsRequest` CR with desired information. + +5. `KubeDB` Ops-manager operator watches the `SolrOpsRequest` CR. + +6. When it finds a `SolrOpsRequest` CR, it pauses the `Solr` object which is referred from the `SolrOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `Solr` object during the reconfiguring TLS process. + +7. Then the `KubeDB` Ops-manager operator will add, remove, update or rotate TLS configuration based on the Ops Request yaml. + +8. Then the `KubeDB` Ops-manager operator will restart all the Pods of the database so that they restart with the new TLS configuration defined in the `SolrOpsRequest` CR. + +9. After the successful reconfiguring of the `Solr` TLS, the `KubeDB` Ops-manager operator resumes the `Solr` object so that the `KubeDB` Provisioner operator resumes its usual operations. + +In the next docs, we are going to show a step by step guide on reconfiguring TLS configuration of a Solr database using `SolrOpsRequest` CRD. \ No newline at end of file diff --git a/docs/guides/solr/reconfigure-tls/solr.md b/docs/guides/solr/reconfigure-tls/solr.md new file mode 100644 index 0000000000..cc49acc024 --- /dev/null +++ b/docs/guides/solr/reconfigure-tls/solr.md @@ -0,0 +1,976 @@ +--- +title: Reconfiguring TLS/SSL +menu: + docs_{{ .version }}: + identifier: sl-reconfigure-tls-solr + name: Reconfigure Solr TLS/SSL Encryption + parent: sl-reconfigure-tls + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure Solr TLS/SSL (Transport Encryption) + +KubeDB supports reconfigure i.e. add, remove, update and rotation of TLS/SSL certificates for existing Solr database via a SolrOpsRequest. This tutorial will show you how to use KubeDB to reconfigure TLS/SSL encryption. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install [`cert-manger`](https://cert-manager.io/docs/installation/) v1.0.0 or later to your cluster to manage your SSL/TLS certificates. + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/Solr](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/Solr) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Add TLS to a Solr database + +Here, We are going to create a Solr without TLS and then reconfigure the database to use TLS. + +### Deploy Solr without TLS + +In this section, we are going to deploy a Solr topology cluster without TLS. In the next few sections we will reconfigure TLS using `SolrOpsRequest` CRD. Below is the YAML of the `Solr` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-cluster + namespace: demo +spec: + enableSSL: true + deletionPolicy: DoNotTerminate + version: 9.6.1 + zookeeperRef: + name: zoo-com + namespace: demo + topology: + overseer: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + data: + replicas: 2 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + coordinator: + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard +``` + +Let's create the `Solr` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/solr/clustering/yamls/topology.yaml +solr.kubedb.com/solr-cluster created +``` + +Now, wait until `solr-cluster` has status `Ready`. i.e, + +```bash +$ kubectl get sl -n demo +NAME TYPE VERSION STATUS AGE +solr-cluster kubedb.com/v1alpha2 9.6.1 Ready 148m +``` + +Now, we can exec one Solr broker pod and verify configuration that the TLS is disabled. + +```bash +$ kubectl exec -it -n demo solr-cluster-data-0 -- env | grep SSL +Defaulted container "solr" out of: solr, init-solr (init) +``` + +We can verify from the above output that TLS is disabled for this cluster. + +### Create Issuer/ ClusterIssuer + +Now, We are going to create an example `Issuer` that will be used to enable SSL/TLS in Solr. Alternatively, you can follow this [cert-manager tutorial](https://cert-manager.io/docs/configuration/ca/) to create your own `Issuer`. + +- Start off by generating a ca certificates using openssl. + +```bash +$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./ca.key -out ./ca.crt -subj "/CN=ca /O=kubedb" +Generating a RSA private key +................+++++ +........................+++++ +writing new private key to './ca.key' +----- +``` + +- Now we are going to create a ca-secret using the certificate files that we have just generated. + +```bash +$ kubectl create secret tls solr-ca \ + --cert=ca.crt \ + --key=ca.key \ + --namespace=demo +secret/solr-ca created +``` + +Now, Let's create an `Issuer` using the `Solr-ca` secret that we have just created. The `YAML` file looks like this: + +```yaml +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: solr-ca-issuer + namespace: demo +spec: + ca: + secretName: solr-ca +``` + +Let's apply the `YAML` file: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/tls/sl-issuer.yaml +issuer.cert-manager.io/solr-ca-issuer created +``` + +### Create SolrOpsRequest + +In order to add TLS to the Solr, we have to create a `SolrOpsRequest` CRO with our created issuer. Below is the YAML of the `SolrOpsRequest` CRO that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: slops-add-tls + namespace: demo +spec: + apply: IfReady + tls: + issuerRef: + apiGroup: cert-manager.io + name: solr-ca-issuer + kind: Issuer + certificates: + - alias: server + subject: + organizations: + - kubedb:server + dnsNames: + - localhost + ipAddresses: + - "127.0.0.1" + databaseRef: + name: solr-cluster + type: ReconfigureTLS +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing reconfigure TLS operation on `solr-cluster` cluster. +- `spec.type` specifies that we are performing `ReconfigureTLS` on Solr. +- `spec.tls.issuerRef` specifies the issuer name, kind and api group. +- `spec.tls.certificates` specifies the certificates. You can learn more about this field from [here](/docs/guides/solr/concepts/solr.md#spectls). + +Let's create the `SolrOpsRequest` CR we have shown above, + +> **Note:** For combined Solr, you just need to refer solr combined object in `databaseRef` field. To learn more about combined solr, please visit [here](/docs/guides/solr/clustering/combined_cluster.md). + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/reconfigure-tls/add-tls.yaml +Solropsrequest.ops.kubedb.com/slops-add-tls created +``` + +#### Verify TLS Enabled Successfully + +Let's wait for `SolrOpsRequest` to be `Successful`. Run the following command to watch `SolrOpsRequest` CRO, + +```bash +$ kubectl get Solropsrequest -n demo +NAME TYPE STATUS AGE +slops-add-tls ReconfigureTLS Successful 4m36s +``` + +We can see from the above output that the `SolrOpsRequest` has succeeded. If we describe the `SolrOpsRequest` we will get an overview of the steps that were followed. + +```bash +$ kubectl describe slops -n demo slops-add-tls +Name: slops-add-tls +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: SolrOpsRequest +Metadata: + Creation Timestamp: 2024-11-04T09:05:19Z + Generation: 1 + Resource Version: 1533152 + UID: 4f057ed5-33be-4753-85ce-a16e2915c6f3 +Spec: + Apply: IfReady + Database Ref: + Name: solr-cluster + Tls: + Certificates: + Alias: server + Dns Names: + localhost + Ip Addresses: + 127.0.0.1 + Subject: + Organizations: + kubedb:server + Issuer Ref: + API Group: cert-manager.io + Kind: ClusterIssuer + Name: self-signed-issuer + Type: ReconfigureTLS +Status: + Conditions: + Last Transition Time: 2024-11-04T09:05:19Z + Message: Solr ops-request has started to reconfigure tls for solr nodes + Observed Generation: 1 + Reason: ReconfigureTLS + Status: True + Type: ReconfigureTLS + Last Transition Time: 2024-11-04T09:05:32Z + Message: Successfully synced all certificates + Observed Generation: 1 + Reason: CertificateSynced + Status: True + Type: CertificateSynced + Last Transition Time: 2024-11-04T09:05:27Z + Message: get certificate; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetCertificate + Last Transition Time: 2024-11-04T09:05:27Z + Message: check ready condition; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CheckReadyCondition + Last Transition Time: 2024-11-04T09:05:27Z + Message: issuing condition; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: IssuingCondition + Last Transition Time: 2024-11-04T09:05:38Z + Message: successfully reconciled the Solr with tls configuration + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-11-04T09:08:13Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-11-04T09:05:43Z + Message: get pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + Observed Generation: 1 + Status: True + Type: GetPod--solr-cluster-overseer-0 + Last Transition Time: 2024-11-04T09:05:43Z + Message: evict pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-cluster-overseer-0 + Last Transition Time: 2024-11-04T09:05:48Z + Message: running pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningPod + Last Transition Time: 2024-11-04T09:06:33Z + Message: get pod; ConditionStatus:True; PodName:solr-cluster-data-0 + Observed Generation: 1 + Status: True + Type: GetPod--solr-cluster-data-0 + Last Transition Time: 2024-11-04T09:06:33Z + Message: evict pod; ConditionStatus:True; PodName:solr-cluster-data-0 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-cluster-data-0 + Last Transition Time: 2024-11-04T09:07:23Z + Message: get pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + Observed Generation: 1 + Status: True + Type: GetPod--solr-cluster-coordinator-0 + Last Transition Time: 2024-11-04T09:07:23Z + Message: evict pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-cluster-coordinator-0 + Last Transition Time: 2024-11-04T09:08:13Z + Message: Successfully completed reconfigureTLS for solr. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: +``` + +Now, Let's exec into a Solr broker pod and verify the configuration that the TLS is enabled. + +```bash + $ kubectl exec -it -n demo solr-cluster-data-0 -- env | grep -i ssl +Defaulted container "solr" out of: solr, init-solr (init) +JAVA_OPTS= -Djavax.net.ssl.trustStore=/var/solr/etc/truststore.p12 -Djavax.net.ssl.trustStorePassword=Ni5tEgfjahzS53D3 -Djavax.net.ssl.keyStore=/var/solr/etc/keystore.p12 -Djavax.net.ssl.keyStorePassword=Ni5tEgfjahzS53D3 -Djavax.net.ssl.keyStoreType=PKCS12 -Djavax.net.ssl.trustStoreType=PKCS12 +SOLR_SSL_KEY_STORE_PASSWORD=Ni5tEgfjahzS53D3 +SOLR_SSL_TRUST_STORE=/var/solr/etc/truststore.p12 +SOLR_SSL_KEY_STORE=/var/solr/etc/keystore.p12 +SOLR_SSL_WANT_CLIENT_AUTH=false +SOLR_SSL_ENABLED=true +SOLR_SSL_TRUST_STORE_PASSWORD=Ni5tEgfjahzS53D3 +SOLR_SSL_NEED_CLIENT_AUTH=false +``` + +We can see from the above output that, keystore location is `/var/solr/etc/keystore.p12` which means that TLS is enabled. + +## Rotate Certificate + +Now we are going to rotate the certificate of this cluster. First let's check the current expiration date of the certificate. + +```bash +$ $ kubectl exec -it -n demo solr-cluster-data-0 -- keytool -list -v -keystore /var/solr/etc/keystore.p12 -storepass Ni5tEgfjahzS53D3 | grep -E 'Valid from|Alias name' +Alias name: 1 +Valid from: Mon Nov 04 09:05:23 UTC 2024 until: Sun Feb 02 09:05:23 UTC 2025 +Valid from: Thu Aug 15 05:59:09 UTC 2024 until: Fri Aug 15 05:59:09 UTC 2025 + +``` + +So, the certificate will expire on this time `Sun Feb 02 09:05:23 UTC 2025`. + +### Create SolrOpsRequest + +Now we are going to increase it using a SolrOpsRequest. Below is the yaml of the ops request that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: slops-rotate + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: solr-cluster + tls: + rotateCertificates: true +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing reconfigure TLS operation on `solr-cluster`. +- `spec.type` specifies that we are performing `ReconfigureTLS` on our cluster. +- `spec.tls.rotateCertificates` specifies that we want to rotate the certificate of this Solr cluster. + +Let's create the `SolrOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/reconfigure-tls/rotate-tls.yaml +Solropsrequest.ops.kubedb.com/slops-rotate created +``` + +#### Verify Certificate Rotated Successfully + +Let's wait for `SolrOpsRequest` to be `Successful`. Run the following command to watch `SolrOpsRequest` CRO, + +```bash +$ kubectl get slops -n demo slops-rotate +NAME TYPE STATUS AGE +slops-rotate ReconfigureTLS Successful 32m +``` + +We can see from the above output that the `SolrOpsRequest` has succeeded. If we describe the `SolrOpsRequest` we will get an overview of the steps that were followed. + +```bash +$ kubectl describe slops -n demo slops-rotate +Name: slops-rotate +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: SolrOpsRequest +Metadata: + Creation Timestamp: 2024-11-04T12:20:18Z + Generation: 1 + Resource Version: 1550013 + UID: 0a9e1d2c-f322-4f7d-8344-43440456331b +Spec: + Apply: IfReady + Database Ref: + Name: solr-cluster + Tls: + Rotate Certificates: true + Type: ReconfigureTLS +Status: + Conditions: + Last Transition Time: 2024-11-04T12:20:18Z + Message: Solr ops-request has started to reconfigure tls for solr nodes + Observed Generation: 1 + Reason: ReconfigureTLS + Status: True + Type: ReconfigureTLS + Last Transition Time: 2024-11-04T12:20:31Z + Message: Successfully synced all certificates + Observed Generation: 1 + Reason: CertificateSynced + Status: True + Type: CertificateSynced + Last Transition Time: 2024-11-04T12:20:26Z + Message: get certificate; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetCertificate + Last Transition Time: 2024-11-04T12:20:26Z + Message: check ready condition; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CheckReadyCondition + Last Transition Time: 2024-11-04T12:20:26Z + Message: issuing condition; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: IssuingCondition + Last Transition Time: 2024-11-04T12:20:37Z + Message: successfully reconciled the Solr with tls configuration + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-11-04T12:23:07Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-11-04T12:20:42Z + Message: get pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + Observed Generation: 1 + Status: True + Type: GetPod--solr-cluster-overseer-0 + Last Transition Time: 2024-11-04T12:20:42Z + Message: evict pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-cluster-overseer-0 + Last Transition Time: 2024-11-04T12:20:47Z + Message: running pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningPod + Last Transition Time: 2024-11-04T12:21:32Z + Message: get pod; ConditionStatus:True; PodName:solr-cluster-data-0 + Observed Generation: 1 + Status: True + Type: GetPod--solr-cluster-data-0 + Last Transition Time: 2024-11-04T12:21:32Z + Message: evict pod; ConditionStatus:True; PodName:solr-cluster-data-0 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-cluster-data-0 + Last Transition Time: 2024-11-04T12:22:22Z + Message: get pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + Observed Generation: 1 + Status: True + Type: GetPod--solr-cluster-coordinator-0 + Last Transition Time: 2024-11-04T12:22:22Z + Message: evict pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-cluster-coordinator-0 + Last Transition Time: 2024-11-04T12:23:07Z + Message: Successfully completed reconfigureTLS for solr. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 33m KubeDB Ops-manager Operator Start processing for SolrOpsRequest: demo/rotate-tls + Normal Starting 33m KubeDB Ops-manager Operator Pausing Solr databse: demo/solr-cluster + Normal Successful 33m KubeDB Ops-manager Operator Successfully paused Solr database: demo/solr-cluster for SolrOpsRequest: rotate-tls + Warning get certificate; ConditionStatus:True 33m KubeDB Ops-manager Operator get certificate; ConditionStatus:True + Warning check ready condition; ConditionStatus:True 33m KubeDB Ops-manager Operator check ready condition; ConditionStatus:True + Warning issuing condition; ConditionStatus:True 33m KubeDB Ops-manager Operator issuing condition; ConditionStatus:True + Warning get certificate; ConditionStatus:True 33m KubeDB Ops-manager Operator get certificate; ConditionStatus:True + Warning check ready condition; ConditionStatus:True 33m KubeDB Ops-manager Operator check ready condition; ConditionStatus:True + Warning issuing condition; ConditionStatus:True 33m KubeDB Ops-manager Operator issuing condition; ConditionStatus:True + Normal CertificateSynced 33m KubeDB Ops-manager Operator Successfully synced all certificates + Warning get certificate; ConditionStatus:True 33m KubeDB Ops-manager Operator get certificate; ConditionStatus:True + Warning check ready condition; ConditionStatus:True 33m KubeDB Ops-manager Operator check ready condition; ConditionStatus:True + Warning issuing condition; ConditionStatus:True 33m KubeDB Ops-manager Operator issuing condition; ConditionStatus:True + Warning get certificate; ConditionStatus:True 33m KubeDB Ops-manager Operator get certificate; ConditionStatus:True + Warning check ready condition; ConditionStatus:True 33m KubeDB Ops-manager Operator check ready condition; ConditionStatus:True + Warning issuing condition; ConditionStatus:True 33m KubeDB Ops-manager Operator issuing condition; ConditionStatus:True + Normal CertificateSynced 33m KubeDB Ops-manager Operator Successfully synced all certificates + Normal UpdatePetSets 33m KubeDB Ops-manager Operator successfully reconciled the Solr with tls configuration + Warning get pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 33m KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + Warning evict pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 33m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + Warning running pod; ConditionStatus:False 32m KubeDB Ops-manager Operator running pod; ConditionStatus:False + Warning get pod; ConditionStatus:True; PodName:solr-cluster-data-0 32m KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:solr-cluster-data-0 + Warning evict pod; ConditionStatus:True; PodName:solr-cluster-data-0 32m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:solr-cluster-data-0 + Warning get pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 31m KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + Warning evict pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 31m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + Normal RestartNodes 30m KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 30m KubeDB Ops-manager Operator Resuming Solr database: demo/solr-cluster + Normal Successful 30m KubeDB Ops-manager Operator Successfully resumed Solr database: demo/solr-cluster for SolrOpsRequest: rotate-tls +``` + +Now, let's check the expiration date of the certificate. + +```bash +$ kubectl exec -it -n demo solr-cluster-data-0 -- keytool -list -v -keystore /var/solr/etc/keystore.p12 -storepass Ni5tEgfjahzS53D3 | grep -E 'Valid from|Alias name' +Defaulted container "solr" out of: solr, init-solr (init) +Alias name: 1 +Valid from: Mon Nov 04 12:23:07 UTC 2024 until: Sun Feb 02 12:23:07 UTC 2025 +Valid from: Thu Aug 15 05:59:09 UTC 2024 until: Fri Aug 15 05:59:09 UTC 2025 +``` + +As we can see from the above output, the certificate has been rotated successfully. + +## Change Issuer/ClusterIssuer + +Now, we are going to change the issuer of this database. + +- Let's create a new ca certificate and key using a different subject `CN=ca-update,O=kubedb-updated`. + +```bash +$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./ca.key -out ./ca.crt -subj "/CN=ca-updated /O=kubedb-updated" +Generating a RSA private key +..............................................................+++++ +......................................................................................+++++ +writing new private key to './ca.key' +----- +``` + +- Now we are going to create a new ca-secret using the certificate files that we have just generated. + +```bash +$ kubectl create secret tls Solr-new-ca \ + --cert=ca.crt \ + --key=ca.key \ + --namespace=demo +secret/solr-new-ca created +``` + +Now, Let's create a new `Issuer` using the `mongo-new-ca` secret that we have just created. The `YAML` file looks like this: + +```yaml +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: sl-new-issuer + namespace: demo +spec: + ca: + secretName: solr-new-ca +``` + +Let's apply the `YAML` file: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/reconfigure-tls/sl-new-issuer.yaml +issuer.cert-manager.io/sl-new-issuer created +``` + +### Create SolrOpsRequest + +In order to use the new issuer to issue new certificates, we have to create a `SolrOpsRequest` CRO with the newly created issuer. Below is the YAML of the `SolrOpsRequest` CRO that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: slops-update-issuer + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: solr-cluster + tls: + issuerRef: + name: sl-new-issuer + kind: Issuer + apiGroup: "cert-manager.io" +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing reconfigure TLS operation on `solr-cluster` cluster. +- `spec.type` specifies that we are performing `ReconfigureTLS` on our Solr. +- `spec.tls.issuerRef` specifies the issuer name, kind and api group. + +Let's create the `SolrOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/Solr/reconfigure-tls/sl-update-issuer.yaml +solrpsrequest.ops.kubedb.com/slops-update-issuer created +``` + +#### Verify Issuer is changed successfully + +Let's wait for `SolrOpsRequest` to be `Successful`. Run the following command to watch `SolrOpsRequest` CRO, + +```bash +$ kubectl get solropsrequests -n demo slops-update-issuer +NAME TYPE STATUS AGE +slops-update-issuer ReconfigureTLS Successful 8m6s +``` + +We can see from the above output that the `SolrOpsRequest` has succeeded. If we describe the `SolrOpsRequest` we will get an overview of the steps that were followed. + +```bash +$ kubectl describe slops -n demo slops-update-issuer +Name: slops-update-issuer +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: SolrOpsRequest +Metadata: + Creation Timestamp: 2024-11-04T13:04:09Z + Generation: 1 + Resource Version: 1553891 + UID: aa1a5101-8daa-4a0e-b640-c6ba8c20a431 +Spec: + Apply: IfReady + Database Ref: + Name: solr-cluster + Tls: + Issuer Ref: + API Group: cert-manager.io + Kind: Issuer + Name: sl-new-issuer + Type: ReconfigureTLS +Status: + Conditions: + Last Transition Time: 2024-11-04T13:04:09Z + Message: Solr ops-request has started to reconfigure tls for solr nodes + Observed Generation: 1 + Reason: ReconfigureTLS + Status: True + Type: ReconfigureTLS + Last Transition Time: 2024-11-04T13:04:22Z + Message: Successfully synced all certificates + Observed Generation: 1 + Reason: CertificateSynced + Status: True + Type: CertificateSynced + Last Transition Time: 2024-11-04T13:04:17Z + Message: get certificate; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetCertificate + Last Transition Time: 2024-11-04T13:04:17Z + Message: check ready condition; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CheckReadyCondition + Last Transition Time: 2024-11-04T13:04:17Z + Message: issuing condition; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: IssuingCondition + Last Transition Time: 2024-11-04T13:04:27Z + Message: successfully reconciled the Solr with tls configuration + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-11-04T13:07:02Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-11-04T13:04:32Z + Message: get pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + Observed Generation: 1 + Status: True + Type: GetPod--solr-cluster-overseer-0 + Last Transition Time: 2024-11-04T13:04:32Z + Message: evict pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-cluster-overseer-0 + Last Transition Time: 2024-11-04T13:04:37Z + Message: running pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningPod + Last Transition Time: 2024-11-04T13:05:22Z + Message: get pod; ConditionStatus:True; PodName:solr-cluster-data-0 + Observed Generation: 1 + Status: True + Type: GetPod--solr-cluster-data-0 + Last Transition Time: 2024-11-04T13:05:22Z + Message: evict pod; ConditionStatus:True; PodName:solr-cluster-data-0 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-cluster-data-0 + Last Transition Time: 2024-11-04T13:06:12Z + Message: get pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + Observed Generation: 1 + Status: True + Type: GetPod--solr-cluster-coordinator-0 + Last Transition Time: 2024-11-04T13:06:12Z + Message: evict pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-cluster-coordinator-0 + Last Transition Time: 2024-11-04T13:07:02Z + Message: Successfully completed reconfigureTLS for solr. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 3m52s KubeDB Ops-manager Operator Start processing for SolrOpsRequest: demo/slops-update-issuer + Normal Starting 3m52s KubeDB Ops-manager Operator Pausing Solr databse: demo/solr-cluster + Normal Successful 3m52s KubeDB Ops-manager Operator Successfully paused Solr database: demo/solr-cluster for SolrOpsRequest: slops-update-issuer + Warning get certificate; ConditionStatus:True 3m44s KubeDB Ops-manager Operator get certificate; ConditionStatus:True + Warning check ready condition; ConditionStatus:True 3m44s KubeDB Ops-manager Operator check ready condition; ConditionStatus:True + Warning issuing condition; ConditionStatus:True 3m44s KubeDB Ops-manager Operator issuing condition; ConditionStatus:True + Warning get certificate; ConditionStatus:True 3m44s KubeDB Ops-manager Operator get certificate; ConditionStatus:True + Warning check ready condition; ConditionStatus:True 3m44s KubeDB Ops-manager Operator check ready condition; ConditionStatus:True + Warning issuing condition; ConditionStatus:True 3m44s KubeDB Ops-manager Operator issuing condition; ConditionStatus:True + Normal CertificateSynced 3m44s KubeDB Ops-manager Operator Successfully synced all certificates + Warning get certificate; ConditionStatus:True 3m39s KubeDB Ops-manager Operator get certificate; ConditionStatus:True + Warning check ready condition; ConditionStatus:True 3m39s KubeDB Ops-manager Operator check ready condition; ConditionStatus:True + Warning issuing condition; ConditionStatus:True 3m39s KubeDB Ops-manager Operator issuing condition; ConditionStatus:True + Warning get certificate; ConditionStatus:True 3m39s KubeDB Ops-manager Operator get certificate; ConditionStatus:True + Warning check ready condition; ConditionStatus:True 3m39s KubeDB Ops-manager Operator check ready condition; ConditionStatus:True + Warning issuing condition; ConditionStatus:True 3m39s KubeDB Ops-manager Operator issuing condition; ConditionStatus:True + Normal CertificateSynced 3m39s KubeDB Ops-manager Operator Successfully synced all certificates + Normal UpdatePetSets 3m34s KubeDB Ops-manager Operator successfully reconciled the Solr with tls configuration + Warning get pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 3m29s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + Warning evict pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 3m29s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + Warning running pod; ConditionStatus:False 3m24s KubeDB Ops-manager Operator running pod; ConditionStatus:False + Warning get pod; ConditionStatus:True; PodName:solr-cluster-data-0 2m39s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:solr-cluster-data-0 + Warning evict pod; ConditionStatus:True; PodName:solr-cluster-data-0 2m39s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:solr-cluster-data-0 + Warning get pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 109s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + Warning evict pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 109s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + Normal RestartNodes 59s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 59s KubeDB Ops-manager Operator Resuming Solr database: demo/solr-cluster + Normal Successful 59s KubeDB Ops-manager Operator Successfully resumed Solr database: demo/solr-cluster for SolrOpsRequest: slops-update-issuer +``` + +Now, Let's exec into a Solr node and find out the ca subject to see if it matches the one we have provided. + +```bash +$ kubectl exec -it -n demo solr-cluster-data-0 -- bash +Defaulted container "solr" out of: solr, init-solr (init) +solr@solr-cluster-data-0:/opt/solr-9.6.1$ keytool -list -v -keystore /var/solr/etc/keystore.p12 -storepass Ni5tEgfjahzS53D3 | grep 'Issuer' +Issuer: O=kubedb-updated, CN="ca-updated " +Issuer: O=kubedb-updated, CN="ca-updated " + +``` + +We can see from the above output that, the subject name matches the subject name of the new ca certificate that we have created. So, the issuer is changed successfully. + +## Remove TLS from the Database + +Now, we are going to remove TLS from this database using a SolrOpsRequest. + +### Create SolrOpsRequest + +Below is the YAML of the `SolrOpsRequest` CRO that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: slops-remove + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: solr-cluster + tls: + remove: true +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing reconfigure TLS operation on `solr-cluster` cluster. +- `spec.type` specifies that we are performing `ReconfigureTLS` on Solr. +- `spec.tls.remove` specifies that we want to remove tls from this cluster. + +Let's create the `SolrOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/reconfigure-tls/remove-tls.yaml +solropsrequest.ops.kubedb.com/slops-remove created +``` + +#### Verify TLS Removed Successfully + +Let's wait for `SolrOpsRequest` to be `Successful`. Run the following command to watch `SolrOpsRequest` CRO, + +```bash +$ kubectl get solropsrequest -n demo slops-remove +NAME TYPE STATUS AGE +slops-remove ReconfigureTLS Successful 105s +``` + +We can see from the above output that the `SolrOpsRequest` has succeeded. If we describe the `SolrOpsRequest` we will get an overview of the steps that were followed. + +```bash +$ kubectl describe slops -n demo slops-remove +Name: slops-remove +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: SolrOpsRequest +Metadata: + Creation Timestamp: 2024-11-04T13:15:15Z + Generation: 1 + Resource Version: 1555016 + UID: a98301fe-af47-4554-9de9-bf6be3041dc3 +Spec: + Apply: IfReady + Database Ref: + Name: solr-cluster + Tls: + Remove: true + Type: ReconfigureTLS +Status: + Conditions: + Last Transition Time: 2024-11-04T13:15:15Z + Message: Solr ops-request has started to reconfigure tls for solr nodes + Observed Generation: 1 + Reason: ReconfigureTLS + Status: True + Type: ReconfigureTLS + Last Transition Time: 2024-11-04T13:15:23Z + Message: successfully reconciled the Solr with tls configuration + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-11-04T13:17:58Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-11-04T13:15:28Z + Message: get pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + Observed Generation: 1 + Status: True + Type: GetPod--solr-cluster-overseer-0 + Last Transition Time: 2024-11-04T13:15:28Z + Message: evict pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-cluster-overseer-0 + Last Transition Time: 2024-11-04T13:15:33Z + Message: running pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningPod + Last Transition Time: 2024-11-04T13:16:13Z + Message: get pod; ConditionStatus:True; PodName:solr-cluster-data-0 + Observed Generation: 1 + Status: True + Type: GetPod--solr-cluster-data-0 + Last Transition Time: 2024-11-04T13:16:13Z + Message: evict pod; ConditionStatus:True; PodName:solr-cluster-data-0 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-cluster-data-0 + Last Transition Time: 2024-11-04T13:17:08Z + Message: get pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + Observed Generation: 1 + Status: True + Type: GetPod--solr-cluster-coordinator-0 + Last Transition Time: 2024-11-04T13:17:08Z + Message: evict pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-cluster-coordinator-0 + Last Transition Time: 2024-11-04T13:17:58Z + Message: Successfully completed reconfigureTLS for solr. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 6m3s KubeDB Ops-manager Operator Start processing for SolrOpsRequest: demo/slops-remove + Normal Starting 6m3s KubeDB Ops-manager Operator Pausing Solr databse: demo/solr-cluster + Normal Successful 6m3s KubeDB Ops-manager Operator Successfully paused Solr database: demo/solr-cluster for SolrOpsRequest: slops-remove + Normal UpdatePetSets 5m55s KubeDB Ops-manager Operator successfully reconciled the Solr with tls configuration + Warning get pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 5m50s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + Warning evict pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 5m50s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + Warning running pod; ConditionStatus:False 5m45s KubeDB Ops-manager Operator running pod; ConditionStatus:False + Warning get pod; ConditionStatus:True; PodName:solr-cluster-data-0 5m5s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:solr-cluster-data-0 + Warning evict pod; ConditionStatus:True; PodName:solr-cluster-data-0 5m5s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:solr-cluster-data-0 + Warning get pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 4m10s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + Warning evict pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 4m10s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + Normal RestartNodes 3m20s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 3m20s KubeDB Ops-manager Operator Resuming Solr database: demo/solr-cluster + Normal Successful 3m20s KubeDB Ops-manager Operator Successfully resumed Solr database: demo/solr-cluster for SolrOpsRequest: slops-remove +``` + +Now, Let's exec into one of the broker node and find out that TLS is disabled or not. + +```bash +$ kubectl exec -it -n demo solr-cluster-data-0 -- env | grep -i ssl +Defaulted container "solr" out of: solr, init-solr (init) +``` + +So, we can see from the above that, output that tls is disabled successfully. + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete opsrequest slops-add-tls slops-remove slops-rotate slops-update-issuer +kubectl delete solr -n demo solr-cluster +kubectl delete issuer -n demo sl-issuer sl-new-issuer +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Solr object](/docs/guides/solr/concepts/solr.md). +- Different Solr topology clustering modes [here](/docs/guides/solr/clustering/topology_cluster.md). +- Monitor your Solr database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/solr/monitoring/prometheus-operator.md) +- Monitor your Solr database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/solr/monitoring/prometheus-builtin.md) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). + diff --git a/docs/guides/solr/reconfigure/_index.md b/docs/guides/solr/reconfigure/_index.md new file mode 100644 index 0000000000..3b3022e05e --- /dev/null +++ b/docs/guides/solr/reconfigure/_index.md @@ -0,0 +1,10 @@ +--- +title: Solr Reconfigure +menu: + docs_{{ .version }}: + identifier: sl-reconfigure + name: Reconfigure + parent: sl-solr-guides + weight: 30 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/solr/reconfigure/overview.md b/docs/guides/solr/reconfigure/overview.md new file mode 100644 index 0000000000..e25ec94f82 --- /dev/null +++ b/docs/guides/solr/reconfigure/overview.md @@ -0,0 +1,54 @@ +--- +title: Reconfiguring Solr +menu: + docs_{{ .version }}: + identifier: sl-reconfigure-overview + name: Overview + parent: sl-reconfigure + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfiguring Solr + +This guide will give an overview on how KubeDB Ops-manager operator reconfigures `Solr` components such as Combined, Broker, Controller, etc. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [Solr](/docs/guides/solr/concepts/solr.md) + - [SolrOpsRequest](/docs/guides/solr/concepts/solropsrequests.md) + +## How Reconfiguring Solr Process Works + +The following diagram shows how KubeDB Ops-manager operator reconfigures `Solr` components. Open the image in a new tab to see the enlarged version. + +
+  Reconfiguring process of Solr +
Fig: Reconfiguring process of Solr
+
+ +The Reconfiguring Solr process consists of the following steps: + +1. At first, a user creates a `Solr` Custom Resource (CR). + +2. `KubeDB` Provisioner operator watches the `Solr` CR. + +3. When the operator finds a `Solr` CR, it creates required number of `PetSets` and related necessary stuff like secrets, services, etc. + +4. Then, in order to reconfigure the various components (ie. Combined, Broker) of the `Solr`, the user creates a `SolrOpsRequest` CR with desired information. + +5. `KubeDB` Ops-manager operator watches the `SolrOpsRequest` CR. + +6. When it finds a `SolrOpsRequest` CR, it halts the `Solr` object which is referred from the `SolrOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `Solr` object during the reconfiguring process. + +7. Then the `KubeDB` Ops-manager operator will replace the existing configuration with the new configuration provided or merge the new configuration with the existing configuration according to the `MogoDBOpsRequest` CR. + +8. Then the `KubeDB` Ops-manager operator will restart the related PetSet Pods so that they restart with the new configuration defined in the `SolrOpsRequest` CR. + +9. After the successful reconfiguring of the `Solr` components, the `KubeDB` Ops-manager operator resumes the `Solr` object so that the `KubeDB` Provisioner operator resumes its usual operations. + +In the next docs, we are going to show a step by step guide on reconfiguring Solr components using `SolrOpsRequest` CRD. \ No newline at end of file diff --git a/docs/guides/solr/reconfigure/solr.md b/docs/guides/solr/reconfigure/solr.md new file mode 100644 index 0000000000..12513a66ae --- /dev/null +++ b/docs/guides/solr/reconfigure/solr.md @@ -0,0 +1,614 @@ +--- +title: Reconfigure Solr +menu: + docs_{{ .version }}: + identifier: sl-reconfigure-solr + name: Reconfigure OpsRequest + parent: sl-reconfigure + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure Solr Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to reconfigure a Solr cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Solr](/docs/guides/solr/concepts/solr.md) + - [Combined](/docs/guides/solr/clustering/combined_cluster.md) + - [SolrOpsRequest](/docs/guides/solr/concepts/solropsrequests.md) + - [Reconfigure Overview](/docs/guides/solr/reconfigure/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/Solr](/docs/examples/solr) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +Now, we are going to deploy a `Solr` cluster using a supported version by `KubeDB` operator. Then we are going to apply `SolrOpsRequest` to reconfigure its configuration. + +### Prepare Solr Cluster + +Now, we are going to deploy a `Solr` cluster with version `9.6.1`. + +### Deploy Solr + +At first, we will create a secret with the `solr.xml` attribute containing required configuration settings. + +**server.properties:** + +```properties +${solr.max.booleanClauses:2024} +``` +Here, `maxBooleanClauses` is set to `2024`, whereas the default value is `1024`. + +Let's create a k8s secret containing the above configuration where the file name will be the key and the file-content as the value: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: sl-custom-config + namespace: demo +stringData: + "solr.xml": | + + ${solr.max.booleanClauses:2024} + + + solrbackup + us-east-1 + http://s3proxy-s3.demo.svc:80 + + + +``` + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/reconfigure/sl-custom-config.yaml +secret/sl-custom-config created +``` + +In this section, we are going to create a Solr object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `Solr` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr + namespace: demo +spec: + configSecret: + name: sl-custom-config + version: 9.6.1 + replicas: 2 + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: longhorn +``` + +Let's create the `Solr` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/Solr/reconfigure/solr.yaml +solr.kubedb.com/solr created +``` + +Now, wait until `solr` has status `Ready`. i.e, + +```bash +$ kubectl get sl -n demo +NAME TYPE VERSION STATUS AGE +solr kubedb.com/v1alpha2 9.6.1 Ready 10m +``` + +Now, we will check if the Solr has started with the custom configuration we have provided. + +Exec into the Solr pod and execute the following commands to see the configurations: +```bash +$ kubectl exec -it -n demo solr-0 -- bash +Defaulted container "solr" out of: solr, init-solr (init) +solr@solr-0:/opt/solr-9.6.1$ cat /var/solr/solr.xml + + + + + solrbackup + us-east-1 + http://s3proxy-s3.demo.svc:80 + + + /var/solr/data + ${solr.sharedLib:},/opt/solr/contrib/gcs-repository/lib,/opt/solr/contrib/prometheus-exporter/lib,/opt/solr/contrib/s3-repository/lib,/opt/solr/dist + ${solr.allowPaths:} + ${solr.max.booleanClauses:2024} + + ${connTimeout:60000} + ${socketTimeout:600000} + + + ${distribUpdateConnTimeout:60000} + ${distribUpdateSoTimeout:600000} + ${genericCoreNodeNames:true} + ${host:} + ${hostContext:solr} + ${solr.port.advertise:80} + ${zkACLProvider:org.apache.solr.common.cloud.DigestZkACLProvider} + ${zkClientTimeout:30000} + ${zkCredentialsInjector:org.apache.solr.common.cloud.VMParamsZkCredentialsInjector} + ${zkCredentialsProvider:org.apache.solr.common.cloud.DigestZkCredentialsProvider} + + + + +``` +Here, we can see that our given configuration is applied to the Solr cluster. `maxBooleanClauses` is set to `2024`. + +### Reconfigure using new config secret + +Now we will reconfigure this cluster to set `maxBooleanClauses` to `2030`. + +Now, update our `solr.xml` file with the new configuration. + +**server.properties:** + +```properties +${solr.max.booleanClauses:2030} +``` + +Then, we will create a new secret with this configuration file. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: new-sl-custom-config + namespace: demo +stringData: + "solr.xml": | + + ${solr.max.booleanClauses:2030} + + + solrbackup + us-east-1 + http://s3proxy-s3.demo.svc:80 + + + +``` + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/reconfigure/new-sl-custom-config.yaml +secret/new-sl-custom-config created +``` + +#### Create SolrOpsRequest + +Now, we will use this secret to replace the previous secret using a `SolrOpsRequest` CR. The `SolrOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: sl-reconfigure-custom-config + namespace: demo +spec: + apply: IfReady + configuration: + configSecret: + name: new-sl-custom-config + databaseRef: + name: solr + type: Reconfigure +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `Solr-dev` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configSecret.name` specifies the name of the new secret. + +Let's create the `SolrOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/reconfigure/sl-reconfigure-custom-config.yaml +solropsrequest.ops.kubedb.com/sl-reconfigure-custom-config created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will update the `configSecret` of `Solr` object. + +Let's wait for `SolrOpsRequest` to be `Successful`. Run the following command to watch `SolrOpsRequest` CR, + +```bash +$ kubectl get slops -n demo +NAME TYPE STATUS AGE +sl-reconfigure-custom-config Reconfigure Successful 5m24s +``` + +We can see from the above output that the `SolrOpsRequest` has succeeded. If we describe the `SolrOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe slops -n demo sl-reconfigure-custom-config +Name: sl-reconfigure-custom-config +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: SolrOpsRequest +Metadata: + Creation Timestamp: 2024-11-05T12:59:25Z + Generation: 1 + Resource Version: 1665913 + UID: 7bb29ead-8322-4ac3-9375-6dd8594882d1 +Spec: + Apply: IfReady + Configuration: + Config Secret: + Name: new-sl-custom-config + Database Ref: + Name: solr + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-11-05T12:59:25Z + Message: Solr ops-request has started to reconfigure Solr nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-11-05T12:59:33Z + Message: successfully reconciled the Solr with new configure + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-11-05T13:01:39Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-11-05T12:59:38Z + Message: get pod; ConditionStatus:True; PodName:solr-0 + Observed Generation: 1 + Status: True + Type: GetPod--solr-0 + Last Transition Time: 2024-11-05T12:59:38Z + Message: evict pod; ConditionStatus:True; PodName:solr-0 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-0 + Last Transition Time: 2024-11-05T12:59:43Z + Message: running pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningPod + Last Transition Time: 2024-11-05T13:00:38Z + Message: get pod; ConditionStatus:True; PodName:solr-1 + Observed Generation: 1 + Status: True + Type: GetPod--solr-1 + Last Transition Time: 2024-11-05T13:00:38Z + Message: evict pod; ConditionStatus:True; PodName:solr-1 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-1 + Last Transition Time: 2024-11-05T13:01:39Z + Message: Successfully completed reconfigure Solr + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 5m45s KubeDB Ops-manager Operator Start processing for SolrOpsRequest: demo/sl-reconfigure-custom-config + Normal Starting 5m45s KubeDB Ops-manager Operator Pausing Solr databse: demo/solr + Normal Successful 5m45s KubeDB Ops-manager Operator Successfully paused Solr database: demo/solr for SolrOpsRequest: sl-reconfigure-custom-config + Normal UpdatePetSets 5m37s KubeDB Ops-manager Operator successfully reconciled the Solr with new configure + Warning get pod; ConditionStatus:True; PodName:solr-0 5m32s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:solr-0 + Warning evict pod; ConditionStatus:True; PodName:solr-0 5m32s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:solr-0 + Warning running pod; ConditionStatus:False 5m27s KubeDB Ops-manager Operator running pod; ConditionStatus:False + Warning get pod; ConditionStatus:True; PodName:solr-1 4m32s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:solr-1 + Warning evict pod; ConditionStatus:True; PodName:solr-1 4m32s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:solr-1 + Normal RestartNodes 3m31s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 3m31s KubeDB Ops-manager Operator Resuming Solr database: demo/solr + Normal Successful 3m31s KubeDB Ops-manager Operator Successfully resumed Solr database: demo/solr for SolrOpsRequest: sl-reconfigure-custom-config + Normal RestartNodes 3m31s KubeDB Ops-manager Operator Successfully restarted all nodes +``` + +Now let's exec one of the instance and cat solr.xml file to check the new configuration we have provided. + +```bash +$ kubectl exec -it -n demo solr-0 -- bash +Defaulted container "solr" out of: solr, init-solr (init) +solr@solr-0:/opt/solr-9.6.1$ cat /var/solr/solr.xml + + + + + solrbackup + us-east-1 + http://s3proxy-s3.demo.svc:80 + + + /var/solr/data + ${solr.sharedLib:},/opt/solr/contrib/gcs-repository/lib,/opt/solr/contrib/prometheus-exporter/lib,/opt/solr/contrib/s3-repository/lib,/opt/solr/dist + ${solr.allowPaths:} + ${solr.max.booleanClauses:2030} + + ${connTimeout:60000} + ${socketTimeout:600000} + + + ${distribUpdateConnTimeout:60000} + ${distribUpdateSoTimeout:600000} + ${genericCoreNodeNames:true} + ${host:} + ${hostContext:solr} + ${solr.port.advertise:80} + ${zkACLProvider:org.apache.solr.common.cloud.DigestZkACLProvider} + ${zkClientTimeout:30000} + ${zkCredentialsInjector:org.apache.solr.common.cloud.VMParamsZkCredentialsInjector} + ${zkCredentialsProvider:org.apache.solr.common.cloud.DigestZkCredentialsProvider} + + + +``` + +As we can see from the configuration of ready Solr, the value of `log.retention.hours` has been changed from `2024` to `2030`. So the reconfiguration of the cluster is successful. + + +### Reconfigure using apply config + +Now we will reconfigure this cluster again to set `maxBooleanClauses` to `2024`. This time we won't use a new secret. We will use the `applyConfig` field of the `SolrOpsRequest`. This will merge the new config in the existing secret. + +#### Create SolrOpsRequest + +Now, we will use the new configuration in the `applyConfig` field in the `SolrOpsRequest` CR. The `SolrOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: sl-reconfigure-apply-config + namespace: demo +spec: + apply: IfReady + configuration: + applyConfig: + solr.xml: | + + ${solr.max.booleanClauses:2024} + + + solrbackup + us-east-1 + http://s3proxy-s3.demo.svc:80 + + + + databaseRef: + name: solr + type: Reconfigure +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `solr` cluster. +- `spec.type` specifies that we are performing `Reconfigure` on Solr. +- `spec.configuration.applyConfig` specifies the new configuration that will be merged in the existing secret. + +Let's create the `SolrOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/reconfigure/sl-reconfigure-apply-config.yaml +Solropsrequest.ops.kubedb.com/sl-reconfigure-apply-config created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will merge this new config with the existing configuration. + +Let's wait for `SolrOpsRequest` to be `Successful`. Run the following command to watch `SolrOpsRequest` CR, + +```bash +$ kubectl get slops -n demo +NAME TYPE STATUS AGE +sl-reconfigure-custom-config Reconfigure Successful 2m22s +``` + +We can see from the above output that the `SolrOpsRequest` has succeeded. If we describe the `SolrOpsRequest` we will get an overview of the steps that were followed to reconfigure the cluster. + +```bash +$ kubectl describe slops -n demo sl-reconfigure-custom-config +Name: sl-reconfigure-custom-config +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: SolrOpsRequest +Metadata: + Creation Timestamp: 2024-11-05T13:09:55Z + Generation: 1 + Resource Version: 1666897 + UID: 3fd6a300-5ed2-4c0d-b1fd-9102a44b37ce +Spec: + Apply: IfReady + Configuration: + Apply Config: + solr.xml: + ${solr.max.booleanClauses:2024} + + + solrbackup + us-east-1 + http://s3proxy-s3.demo.svc:80 + + + + + Database Ref: + Name: solr + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-11-05T13:09:55Z + Message: Solr ops-request has started to reconfigure Solr nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-11-05T13:09:59Z + Message: Successfully prepared user provided custom config secret + Observed Generation: 1 + Reason: PrepareCustomConfig + Status: True + Type: PrepareCustomConfig + Last Transition Time: 2024-11-05T13:10:04Z + Message: successfully reconciled the Solr with new configure + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-11-05T13:11:55Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-11-05T13:10:09Z + Message: get pod; ConditionStatus:True; PodName:solr-0 + Observed Generation: 1 + Status: True + Type: GetPod--solr-0 + Last Transition Time: 2024-11-05T13:10:09Z + Message: evict pod; ConditionStatus:True; PodName:solr-0 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-0 + Last Transition Time: 2024-11-05T13:10:14Z + Message: running pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningPod + Last Transition Time: 2024-11-05T13:10:54Z + Message: get pod; ConditionStatus:True; PodName:solr-1 + Observed Generation: 1 + Status: True + Type: GetPod--solr-1 + Last Transition Time: 2024-11-05T13:10:54Z + Message: evict pod; ConditionStatus:True; PodName:solr-1 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-1 + Last Transition Time: 2024-11-05T13:11:55Z + Message: Successfully completed reconfigure Solr + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 2m52s KubeDB Ops-manager Operator Start processing for SolrOpsRequest: demo/sl-reconfigure-custom-config + Normal Starting 2m51s KubeDB Ops-manager Operator Pausing Solr databse: demo/solr + Normal Successful 2m51s KubeDB Ops-manager Operator Successfully paused Solr database: demo/solr for SolrOpsRequest: sl-reconfigure-custom-config + Normal UpdatePetSets 2m43s KubeDB Ops-manager Operator successfully reconciled the Solr with new configure + Warning get pod; ConditionStatus:True; PodName:solr-0 2m38s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:solr-0 + Warning evict pod; ConditionStatus:True; PodName:solr-0 2m38s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:solr-0 + Warning running pod; ConditionStatus:False 2m33s KubeDB Ops-manager Operator running pod; ConditionStatus:False + Warning get pod; ConditionStatus:True; PodName:solr-1 113s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:solr-1 + Warning evict pod; ConditionStatus:True; PodName:solr-1 113s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:solr-1 + Normal RestartNodes 52s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 52s KubeDB Ops-manager Operator Resuming Solr database: demo/solr + Normal Successful 52s KubeDB Ops-manager Operator Successfully resumed Solr database: demo/solr for SolrOpsRequest: sl-reconfigure-custom-config +``` + +Now let's exec into one of the instance and cat `solr.xml` file to check the new configuration we have provided. + +```bash +$ kubectl exec -it -n demo solr-0 -- bash +Defaulted container "solr" out of: solr, init-solr (init) +solr@solr-0:/opt/solr-9.6.1$ cat /var/solr/solr.xml + + + + + solrbackup + us-east-1 + http://s3proxy-s3.demo.svc:80 + + + /var/solr/data + ${solr.sharedLib:},/opt/solr/contrib/gcs-repository/lib,/opt/solr/contrib/prometheus-exporter/lib,/opt/solr/contrib/s3-repository/lib,/opt/solr/dist + ${solr.allowPaths:} + ${solr.max.booleanClauses:2024} + + ${connTimeout:60000} + ${socketTimeout:600000} + + + ${distribUpdateConnTimeout:60000} + ${distribUpdateSoTimeout:600000} + ${genericCoreNodeNames:true} + ${host:} + ${hostContext:solr} + ${solr.port.advertise:80} + ${zkACLProvider:org.apache.solr.common.cloud.DigestZkACLProvider} + ${zkClientTimeout:30000} + ${zkCredentialsInjector:org.apache.solr.common.cloud.VMParamsZkCredentialsInjector} + ${zkCredentialsProvider:org.apache.solr.common.cloud.DigestZkCredentialsProvider} + + + +``` + +As we can see from the configuration of ready Solr, the value of `maxBooleanClauses` has been changed from `2030` to `2024`. So the reconfiguration of the database using the `applyConfig` field is successful. + + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete sl -n demo solr +kubectl delete solropsrequest -n demo sl-reconfigure-custom-config sl-reconfigure-apply-config +kubectl delete secret -n demo sl-custom-config new-sl-custom-config +kubectl delete namespace demo +``` + +## Next Steps + +- Detail concepts of [Solr object](/docs/guides/solr/concepts/solr.md). +- Different solr topology clustering modes [here](/docs/guides/solr/clustering/topology_cluster.md). +- Monitor your Solr database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/solr/monitoring/prometheus-operator.md). + +- Monitor your Solr database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/solr/monitoring/prometheus-builtin.md) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/solr/restart/_index.md b/docs/guides/solr/restart/_index.md new file mode 100644 index 0000000000..d9998edbc6 --- /dev/null +++ b/docs/guides/solr/restart/_index.md @@ -0,0 +1,10 @@ +--- +title: Solr Restart +menu: + docs_{{ .version }}: + identifier: sl-restart + name: Restart + parent: sl-solr-guides + weight: 34 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/solr/restart/restart.md b/docs/guides/solr/restart/restart.md new file mode 100644 index 0000000000..d8916ea365 --- /dev/null +++ b/docs/guides/solr/restart/restart.md @@ -0,0 +1,219 @@ +--- +title: Restart Solr +menu: + docs_{{ .version }}: + identifier: sl-restart-details + name: Restart Solr + parent: sl-restart + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Restart Solr + +KubeDB supports restarting the Solr database via a SolrOpsRequest. Restarting is useful if some pods are got stuck in some phase, or they are not working correctly. This tutorial will show you how to use that. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + +```bash + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/Solr](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/solr) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Deploy Solr + +In this section, we are going to deploy a Solr database using KubeDB. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-cluster + namespace: demo +spec: + version: 9.6.1 + zookeeperRef: + name: zoo + namespace: demo + topology: + overseer: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + data: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + coordinator: + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + +``` + +- `spec.topology` represents the specification for Solr topology. + - `data` denotes the data node of solr topology. + - `overseer` denotes the controller node of solr topology. + - `coordinator` denotes the controller node of solr topology + +Let's create the `Solr` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/Sslr/restart/solr-cluster.yaml +solr.kubedb.com/solr-cluster created +``` + +## Apply Restart opsRequest + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: restart + namespace: demo +spec: + apply: IfReady + databaseRef: + name: solr-cluster + type: Restart +``` + +- `spec.type` specifies the Type of the ops Request +- `spec.databaseRef` holds the name of the Solr CR. It should be available in the same namespace as the opsRequest +- The meaning of `spec.timeout` & `spec.apply` fields will be found [here](/docs/guides/solr/concepts/solropsrequests.md#spectimeout) + +> Note: The method of restarting the combined node is exactly same as above. All you need, is to specify the corresponding Solr name in `spec.databaseRef.name` section. + +Let's create the `SolrOpsRequest` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/restart/ops.yaml +solropsrequest.ops.kubedb.com/restart created +``` + +Now the Ops-manager operator will first restart the controller pods, then broker of the referenced Solr. + +```shell +$ kubectl get slops -n demo +NAME TYPE STATUS AGE +restart Restart Successful 2m34s +```` + +```bash +$ kubectl get slops -n demo restart -oyaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"ops.kubedb.com/v1alpha1","kind":"SolrOpsRequest","metadata":{"annotations":{},"name":"restart","namespace":"demo"},"spec":{"apply":"IfReady","databaseRef":{"name":"solr-cluster"},"type":"Restart"}} + creationTimestamp: "2024-11-06T06:11:55Z" + generation: 1 + name: restart + namespace: demo + resourceVersion: "1746799" + uid: de3f03f9-512e-44d2-b29f-6c084c5d993b +spec: + apply: IfReady + databaseRef: + name: solr-cluster + type: Restart +status: + conditions: + - lastTransitionTime: "2024-11-06T06:11:55Z" + message: Solr ops-request has started to restart solr nodes + observedGeneration: 1 + reason: Restart + status: "True" + type: Restart + - lastTransitionTime: "2024-11-06T06:14:23Z" + message: Successfully Restarted Solr nodes + observedGeneration: 1 + reason: RestartNodes + status: "True" + type: RestartNodes + - lastTransitionTime: "2024-11-06T06:12:03Z" + message: get pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + observedGeneration: 1 + status: "True" + type: GetPod--solr-cluster-overseer-0 + - lastTransitionTime: "2024-11-06T06:12:03Z" + message: evict pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + observedGeneration: 1 + status: "True" + type: EvictPod--solr-cluster-overseer-0 + - lastTransitionTime: "2024-11-06T06:12:08Z" + message: running pod; ConditionStatus:False + observedGeneration: 1 + status: "False" + type: RunningPod + - lastTransitionTime: "2024-11-06T06:12:48Z" + message: get pod; ConditionStatus:True; PodName:solr-cluster-data-0 + observedGeneration: 1 + status: "True" + type: GetPod--solr-cluster-data-0 + - lastTransitionTime: "2024-11-06T06:12:48Z" + message: evict pod; ConditionStatus:True; PodName:solr-cluster-data-0 + observedGeneration: 1 + status: "True" + type: EvictPod--solr-cluster-data-0 + - lastTransitionTime: "2024-11-06T06:13:38Z" + message: get pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + observedGeneration: 1 + status: "True" + type: GetPod--solr-cluster-coordinator-0 + - lastTransitionTime: "2024-11-06T06:13:38Z" + message: evict pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + observedGeneration: 1 + status: "True" + type: EvictPod--solr-cluster-coordinator-0 + - lastTransitionTime: "2024-11-06T06:14:23Z" + message: Controller has successfully restart the Solr replicas + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete solropsrequest -n demo restart +kubectl delete solr -n demo solr-cluster +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Solr object](/docs/guides/solr/concepts/solr.md). +- Different Solr topology clustering modes [here](/docs/guides/solr/clustering/topology_cluster.md). +- Monitor your Solr database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/solr/monitoring/prometheus-operator.md). + +- Monitor your Solr database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/solr/monitoring/prometheus-builtin.md) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/solr/scaling/_index.md b/docs/guides/solr/scaling/_index.md new file mode 100644 index 0000000000..13b18cb4b1 --- /dev/null +++ b/docs/guides/solr/scaling/_index.md @@ -0,0 +1,11 @@ +--- +title: Solr Scaling +menu: + docs_{{ .version }}: + identifier: sl-scaling + name: Scaling + parent: sl-solr-guides + weight: 22 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- \ No newline at end of file diff --git a/docs/guides/solr/scaling/horizontal-scaling/_index.md b/docs/guides/solr/scaling/horizontal-scaling/_index.md new file mode 100644 index 0000000000..7a4aa2d2a3 --- /dev/null +++ b/docs/guides/solr/scaling/horizontal-scaling/_index.md @@ -0,0 +1,11 @@ +--- +title: Solr Horizontal Scaling +menu: + docs_{{ .version }}: + identifier: sl-scaling-horizontal + name: Horizontal Scaling + parent: sl-scaling + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- \ No newline at end of file diff --git a/docs/guides/solr/scaling/horizontal-scaling/combined.md b/docs/guides/solr/scaling/horizontal-scaling/combined.md new file mode 100644 index 0000000000..f59779f6b5 --- /dev/null +++ b/docs/guides/solr/scaling/horizontal-scaling/combined.md @@ -0,0 +1,405 @@ +--- +title: Solr Combined Horizontal Scaling +menu: + docs_{{ .version }}: + identifier: sl-scaling-horizontal-combined + name: Combined Cluster + parent: sl-scaling-horizontal + weight: 50 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Horizontal Scale Solr Combined Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to scale the Solr combined cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Solr](/docs/guides/solr/concepts/solr.md) + - [Combined](/docs/guides/solr/clustering/combined_cluster.md) + - [SolrOpsRequest](/docs/guides/solr/concepts/solropsrequests.md) + - [Horizontal Scaling Overview](/docs/guides/solr/scaling/horizontal-scaling/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/solr](/docs/examples/solr) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Apply Horizontal Scaling on Combined Cluster + +Here, we are going to deploy a `Solr` combined cluster using a supported version by `KubeDB` operator. Then we are going to apply horizontal scaling on it. + +### Prepare Solr Combined cluster + +Now, we are going to deploy a `Solr` combined cluster with version `9.4.1`. + +### Deploy Solr combined cluster + +In this section, we are going to deploy a Solr combined cluster. Then, in the next section we will scale the cluster using `SolrOpsRequest` CRD. Below is the YAML of the `Solr` CR that we are going to create, + +```bash +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-combined + namespace: demo +spec: + version: 9.4.1 + replicas: 2 + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +``` + +Let's create the `Solr` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/scaling/horizontal/combined/solr.yaml +solr.kubedb.com/solr-combined created +``` + +Now, wait until `solr-combined` has status `Ready`. i.e, + +```bash +$ kubectl get sl -n demo +NAME TYPE VERSION STATUS AGE +solr-combined kubedb.com/v1alpha2 9.4.1 Ready 65m +``` + +Let's check the number of replicas has from Solr object, number of pods the petset have, + +```bash +$ kubectl get solr -n demo solr-combined -o json | jq '.spec.replicas' +2 +$ kubectl get petset -n demo solr-combined -o json | jq '.spec.replicas' +2 + +``` + +We can see from both command that the cluster has 2 replicas. + +Also, we can verify the replicas of the combined from an internal solr command by exec into a replica. + +We can see from the above output that the Solr has 2 nodes. + +We are now ready to apply the `SolrOpsRequest` CR to scale this cluster. + +## Scale Up Replicas + +Here, we are going to scale up the replicas of the combined cluster to meet the desired number of replicas after scaling. + +#### Create SolrOpsRequest + +In order to scale up the replicas of the combined cluster, we have to create a `SolrOpsRequest` CR with our desired replicas. Below is the YAML of the `SolrOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: slops-hscale-up-combined + namespace: demo +spec: + databaseRef: + name: solr-combined + type: HorizontalScaling + horizontalScaling: + node: 4 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing horizontal scaling operation on `Solr-dev` cluster. +- `spec.type` specifies that we are performing `HorizontalScaling` on Solr. +- `spec.horizontalScaling.node` specifies the desired replicas after scaling. + +Let's create the `SolrOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/scaling/horizontal/combined/scaling.yaml +Solropsrequest.ops.kubedb.com/kfops-hscale-up-combined created +``` + +#### Verify Combined cluster replicas scaled up successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `Solr` object and related `PetSets` and `Pods`. + +Let's wait for `SolrOpsRequest` to be `Successful`. Run the following command to watch `SolrOpsRequest` CR, + +```bash +$ watch kubectl get Solropsrequest -n demo +NAME TYPE STATUS AGE +slops-hscale-up-combined HorizontalScaling Successful 106s +``` + +We can see from the above output that the `SolrOpsRequest` has succeeded. If we describe the `SolrOpsRequest` we will get an overview of the steps that were followed to scale the cluster. + +```bash +$ kubectl describe slops -n demo slops-hscale-up-combined +Name: slops-hscale-up-combined +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: SolrOpsRequest +Metadata: + Creation Timestamp: 2024-11-07T09:54:54Z + Generation: 1 + Resource Version: 1883245 + UID: dfc22d44-6638-43f7-97b7-9846658cc061 +Spec: + Apply: IfReady + Database Ref: + Name: solr-combined + Horizontal Scaling: + Node: 4 + Type: HorizontalScaling +Status: + Conditions: + Last Transition Time: 2024-11-07T09:54:54Z + Message: Solr ops-request has started to horizontally scaling the nodes + Observed Generation: 1 + Reason: HorizontalScaling + Status: True + Type: HorizontalScaling + Last Transition Time: 2024-11-07T09:55:52Z + Message: ScaleUp solr-combined nodes + Observed Generation: 1 + Reason: HorizontalScaleCombinedNode + Status: True + Type: HorizontalScaleCombinedNode + Last Transition Time: 2024-11-07T09:55:02Z + Message: patch pet set; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchPetSet + Last Transition Time: 2024-11-07T09:55:48Z + Message: is node in cluster; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: IsNodeInCluster + Last Transition Time: 2024-11-07T09:55:52Z + Message: Successfully completed horizontally scale Solr cluster + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 89s KubeDB Ops-manager Operator Start processing for SolrOpsRequest: demo/slops-hscale-up-combined + Normal Starting 89s KubeDB Ops-manager Operator Pausing Solr databse: demo/solr-combined + Normal Successful 89s KubeDB Ops-manager Operator Successfully paused Solr database: demo/solr-combined for SolrOpsRequest: slops-hscale-up-combined + Warning patch pet set; ConditionStatus:True 81s KubeDB Ops-manager Operator patch pet set; ConditionStatus:True + Warning is node in cluster; ConditionStatus:False 76s KubeDB Ops-manager Operator is node in cluster; ConditionStatus:False + Warning is node in cluster; ConditionStatus:True 60s KubeDB Ops-manager Operator is node in cluster; ConditionStatus:True + Warning patch pet set; ConditionStatus:True 56s KubeDB Ops-manager Operator patch pet set; ConditionStatus:True + Warning is node in cluster; ConditionStatus:False 51s KubeDB Ops-manager Operator is node in cluster; ConditionStatus:False + Warning is node in cluster; ConditionStatus:True 35s KubeDB Ops-manager Operator is node in cluster; ConditionStatus:True + Normal HorizontalScaleCombinedNode 31s KubeDB Ops-manager Operator ScaleUp solr-combined nodes + Normal Starting 31s KubeDB Ops-manager Operator Resuming Solr database: demo/solr-combined + Normal Successful 31s KubeDB Ops-manager Operator Successfully resumed Solr database: demo/solr-combined for SolrOpsRequest: slops-hscale-up-combined +``` + +Now, we are going to verify the number of replicas this cluster has from the Solr object, number of pods the petset have, + +```bash +$ kubectl get solr -n demo solr-combined -o json | jq '.spec.replicas' +4 +$ kubectl get petset -n demo solr-combined -o json | jq '.spec.replicas' +4 +``` + +### Scale Down Replicas + +Here, we are going to scale down the replicas of the Solr combined cluster to meet the desired number of replicas after scaling. + +#### Create SolrOpsRequest + +In order to scale down the replicas of the Solr combined cluster, we have to create a `SolrOpsRequest` CR with our desired replicas. Below is the YAML of the `SolrOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: slops-hscale-down-combined + namespace: demo +spec: + databaseRef: + name: solr-combined + type: HorizontalScaling + horizontalScaling: + node: 2 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing horizontal scaling down operation on `Solr-dev` cluster. +- `spec.type` specifies that we are performing `HorizontalScaling` on Solr. +- `spec.horizontalScaling.node` specifies the desired replicas after scaling. + +Let's create the `SolrOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/scaling/horizontal-scaling/solr-hscale-down-combined.yaml +solropsrequest.ops.kubedb.com/slops-hscale-down-combined created +``` + +#### Verify Combined cluster replicas scaled down successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `Solr` object and related `PetSets` and `Pods`. + +Let's wait for `SolrOpsRequest` to be `Successful`. Run the following command to watch `SolrOpsRequest` CR, + +```bash +$ watch kubectl get Solropsrequest -n demo +NAME TYPE STATUS AGE +slops-hscale-down-combined HorizontalScaling Successful 2m32s +``` + +We can see from the above output that the `SolrOpsRequest` has succeeded. If we describe the `SolrOpsRequest` we will get an overview of the steps that were followed to scale the cluster. + +```bash +$ kubectl describe slops -n demo slops-hscale-down-combined +Name: slops-hscale-down-combined +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: SolrOpsRequest +Metadata: + Creation Timestamp: 2024-11-07T09:59:14Z + Generation: 1 + Resource Version: 1883926 + UID: 6f028e60-ed6f-4716-920d-eb348f9bee80 +Spec: + Apply: IfReady + Database Ref: + Name: solr-combined + Horizontal Scaling: + Node: 2 + Type: HorizontalScaling +Status: + Conditions: + Last Transition Time: 2024-11-07T09:59:14Z + Message: Solr ops-request has started to horizontally scaling the nodes + Observed Generation: 1 + Reason: HorizontalScaling + Status: True + Type: HorizontalScaling + Last Transition Time: 2024-11-07T10:01:42Z + Message: ScaleDown solr-combined nodes + Observed Generation: 1 + Reason: HorizontalScaleCombinedNode + Status: True + Type: HorizontalScaleCombinedNode + Last Transition Time: 2024-11-07T09:59:22Z + Message: reassign partitions; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: ReassignPartitions + Last Transition Time: 2024-11-07T09:59:22Z + Message: is pet set patched; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: IsPetSetPatched + Last Transition Time: 2024-11-07T10:01:37Z + Message: get pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPod + Last Transition Time: 2024-11-07T10:00:27Z + Message: delete pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DeletePvc + Last Transition Time: 2024-11-07T10:01:37Z + Message: get pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPvc + Last Transition Time: 2024-11-07T10:01:42Z + Message: Successfully completed horizontally scale Solr cluster + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 8m37s KubeDB Ops-manager Operator Start processing for SolrOpsRequest: demo/slops-hscale-down-combined + Normal Starting 8m37s KubeDB Ops-manager Operator Pausing Solr databse: demo/solr-combined + Normal Successful 8m37s KubeDB Ops-manager Operator Successfully paused Solr database: demo/solr-combined for SolrOpsRequest: slops-hscale-down-combined + Warning reassign partitions; ConditionStatus:True 8m29s KubeDB Ops-manager Operator reassign partitions; ConditionStatus:True + Warning is pet set patched; ConditionStatus:True 8m29s KubeDB Ops-manager Operator is pet set patched; ConditionStatus:True + Warning get pod; ConditionStatus:False 8m24s KubeDB Ops-manager Operator get pod; ConditionStatus:False + Warning get pod; ConditionStatus:True 7m24s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning delete pvc; ConditionStatus:True 7m24s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pvc; ConditionStatus:False 7m24s KubeDB Ops-manager Operator get pvc; ConditionStatus:False + Warning get pod; ConditionStatus:True 7m24s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning delete pvc; ConditionStatus:True 7m24s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pvc; ConditionStatus:True 7m24s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning reassign partitions; ConditionStatus:True 7m19s KubeDB Ops-manager Operator reassign partitions; ConditionStatus:True + Warning is pet set patched; ConditionStatus:True 7m19s KubeDB Ops-manager Operator is pet set patched; ConditionStatus:True + Warning get pod; ConditionStatus:False 7m14s KubeDB Ops-manager Operator get pod; ConditionStatus:False + Warning get pod; ConditionStatus:True 6m14s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning delete pvc; ConditionStatus:True 6m14s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pvc; ConditionStatus:False 6m14s KubeDB Ops-manager Operator get pvc; ConditionStatus:False + Warning get pod; ConditionStatus:True 6m14s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning delete pvc; ConditionStatus:True 6m14s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pvc; ConditionStatus:True 6m14s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Normal HorizontalScaleCombinedNode 6m9s KubeDB Ops-manager Operator ScaleDown solr-combined nodes + Normal Starting 6m9s KubeDB Ops-manager Operator Resuming Solr database: demo/solr-combined + Normal Successful 6m9s KubeDB Ops-manager Operator Successfully resumed Solr database: demo/solr-combined for SolrOpsRequest: slops-hscale-down-combined +``` + +Now, we are going to verify the number of replicas this cluster has from the Solr object, number of pods the petset have, + +```bash +$ kubectl get solr -n demo solr-combined -o json | jq '.spec.replicas' +2 +$ kubectl get petset -n demo solr-combined -o json | jq '.spec.replicas' +2 +``` + +From all the above outputs we can see that the replicas of the combined cluster is `2`. That means we have successfully scaled down the replicas of the Solr combined cluster. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete sl -n demo solr-cluster +kubectl delete solropsrequest -n demo slops-hscale-up-topology slops-hscale-down-topology +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Solr object](/docs/guides/solr/concepts/solr.md). +- Different Solr topology clustering modes [here](/docs/guides/solr/clustering/topology_cluster.md). +- Monitor your Solr database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/solr/monitoring/prometheus-operator.md). + +- Monitor your Solr database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/solr/monitoring/prometheus-builtin.md) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/solr/scaling/horizontal-scaling/overview.md b/docs/guides/solr/scaling/horizontal-scaling/overview.md new file mode 100644 index 0000000000..386c517e8f --- /dev/null +++ b/docs/guides/solr/scaling/horizontal-scaling/overview.md @@ -0,0 +1,54 @@ +--- +title: Solr Horizontal Scaling Overview +menu: + docs_{{ .version }}: + identifier: sl-scaling-horizontal-overview + name: Overview + parent: sl-scaling-horizontal + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Solr Horizontal Scaling + +This guide will give an overview on how KubeDB Ops-manager operator scales up or down `Solr` cluster replicas of various component such as Combined, Broker, Controller. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [Solr](/docs/guides/solr/concepts/solr.md) + - [SolrOpsRequest](/docs/guides/solr/concepts/solropsrequests.md) + +## How Horizontal Scaling Process Works + +The following diagram shows how KubeDB Ops-manager operator scales up or down `Solr` database components. Open the image in a new tab to see the enlarged version. + +
+  Horizontal scaling process of Solr +
Fig: Horizontal scaling process of Solr
+
+ +The Horizontal scaling process consists of the following steps: + +1. At first, a user creates a `Solr` Custom Resource (CR). + +2. `KubeDB` Provisioner operator watches the `Solr` CR. + +3. When the operator finds a `Solr` CR, it creates required number of `PetSets` and related necessary stuff like secrets, services, etc. + +4. Then, in order to scale the various components of the `Solr` cluster, the user creates a `SolrOpsRequest` CR with desired information. + +5. `KubeDB` Ops-manager operator watches the `SolrOpsRequest` CR. + +6. When it finds a `SolrOpsRequest` CR, it halts the `Solr` object which is referred from the `SolrOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `Solr` object during the horizontal scaling process. + +7. Then the `KubeDB` Ops-manager operator will scale the related PetSet Pods to reach the expected number of replicas defined in the `SolrOpsRequest` CR. + +8. After the successfully scaling the replicas of the related PetSet Pods, the `KubeDB` Ops-manager operator updates the number of replicas in the `Solr` object to reflect the updated state. + +9. After the successful scaling of the `Solr` replicas, the `KubeDB` Ops-manager operator resumes the `Solr` object so that the `KubeDB` Provisioner operator resumes its usual operations. + +In the next docs, we are going to show a step by step guide on horizontal scaling of Solr cluster using `SolrOpsRequest` CRD. \ No newline at end of file diff --git a/docs/guides/solr/scaling/horizontal-scaling/topology.md b/docs/guides/solr/scaling/horizontal-scaling/topology.md new file mode 100644 index 0000000000..4afe88862f --- /dev/null +++ b/docs/guides/solr/scaling/horizontal-scaling/topology.md @@ -0,0 +1,507 @@ +--- +title: Solr Topology Horizontal Scaling +menu: + docs_{{ .version }}: + identifier: sl-scaling-horizontal-topology + name: Topology Cluster + parent: sl-scaling-horizontal + weight: 40 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Horizontal Scale Solr Topology Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to scale the Solr topology cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Solr](/docs/guides/solr/concepts/solr.md) + - [Topology](/docs/guides/solr/clustering/topology_cluster.md) + - [SolrOpsRequest](/docs/guides/solr/concepts/solropsrequests.md) + - [Horizontal Scaling Overview](/docs/guides/solr/scaling/horizontal-scaling/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/solr](/docs/examples/solr) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Apply Horizontal Scaling on Topology Cluster + +Here, we are going to deploy a `Solr` topology cluster using a supported version by `KubeDB` operator. Then we are going to apply horizontal scaling on it. + +### Prepare Solr Topology cluster + +Now, we are going to deploy a `Solr` topology cluster with version `9.4.1`. + +### Deploy Solr topology cluster + +In this section, we are going to deploy a Solr topology cluster. Then, in the next section we will scale the cluster using `SolrOpsRequest` CRD. Below is the YAML of the `Solr` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-cluster + namespace: demo +spec: + version: 9.4.1 + zookeeperRef: + name: zoo + namespace: demo + topology: + overseer: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + data: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + coordinator: + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + +``` + +Let's create the `Solr` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/scaling/horizontal/topology/solr.yaml +solr.kubedb.com/solr-cluster created +``` + +Now, wait until `solr-cluster` has status `Ready`. i.e, + +```bash +$ kubectl get sl -n demo +NAME TYPE VERSION STATUS AGE +solr-cluster kubedb.com/v1alpha2 9.4.1 Ready 90m +``` + +Let's check the number of replicas has from Solr object, number of pods the petset have, + +**Data Replicas** + +```bash +$ kubectl get solr -n demo solr-cluster -o json | jq '.spec.topology.data.replicas' +1 +$ kubectl get petset -n demo solr-cluster-data -o json | jq '.spec.replicas' +1 +``` + +**Overseer Replicas** + +```bash +$ kubectl get solr -n demo solr-cluster -o json | jq '.spec.topology.overseer.replicas' +1 +$ kubectl get petset -n demo solr-cluster-overseer -o json | jq '.spec.replicas' +1 + +``` + +**Coordinator Replicas** + +```bash +$ kubectl get solr -n demo solr-cluster -o json | jq '.spec.topology.coordinator.replicas' +1 +$ kubectl get petset -n demo solr-cluster-coordinator -o json | jq '.spec.replicas' +1 + +``` + +We can see from commands that the cluster has 3 replicas for data, overseer, coordinator. + + + +## Scale Up Replicas + +Here, we are going to scale up the replicas of the topology cluster to meet the desired number of replicas after scaling. + +#### Create SolrOpsRequest + +In order to scale up the replicas of the topology cluster, we have to create a `SolrOpsRequest` CR with our desired replicas. Below is the YAML of the `SolrOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: slops-hscale-up-topology + namespace: demo +spec: + databaseRef: + name: solr-cluster + type: HorizontalScaling + horizontalScaling: + data: 2 + overseer: 2 + coordinator: 2 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing horizontal scaling operation on `Solr-prod` cluster. +- `spec.type` specifies that we are performing `HorizontalScaling` on Solr. +- `spec.horizontalScaling.topology.data` specifies the desired replicas after scaling for data. +- `spec.horizontalScaling.topology.overseer` specifies the desired replicas after scaling for overseer. +- `spec.horizontalScaling.topology.coordinator` specifies the desired replicas after scaling for coordinator. + + +Let's create the `SolrOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/scaling/horizontal/topology/slops-hscale-up-topology.yaml +solropsrequest.ops.kubedb.com/slops-hscale-up-topology created +``` + +> **Note:** If you want to scale down only broker or controller, you can specify the desired replicas for only broker or controller in the `SolrOpsRequest` CR. You can specify one at a time. If you want to scale broker only, no node will need restart to apply the changes. But if you want to scale controller, all nodes will need restart to apply the changes. + +#### Verify Topology cluster replicas scaled up successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `Solr` object and related `PetSets` and `Pods`. + +Let's wait for `SolrOpsRequest` to be `Successful`. Run the following command to watch `SolrOpsRequest` CR, + +```bash +$ watch kubectl get Solropsrequest -n demo +NAME TYPE STATUS AGE +slops-hscale-up-topology HorizontalScaling Successful 106s +``` + +We can see from the above output that the `SolrOpsRequest` has succeeded. If we describe the `SolrOpsRequest` we will get an overview of the steps that were followed to scale the cluster. + +```bash +$ kubectl describe slops -n demo slops-hscale-up-topology +Name: slops-hscale-up-topology +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: SolrOpsRequest +Metadata: + Creation Timestamp: 2024-11-07T07:40:35Z + Generation: 1 + Resource Version: 1870552 + UID: 142fb5b9-26ec-4dab-ad39-ebfd4470b1db +Spec: + Apply: IfReady + Database Ref: + Name: solr-cluster + Horizontal Scaling: + Data: 2 + Overseer: 2 + Type: HorizontalScaling +Status: + Conditions: + Last Transition Time: 2024-11-07T07:40:35Z + Message: Solr ops-request has started to horizontally scaling the nodes + Observed Generation: 1 + Reason: HorizontalScaling + Status: True + Type: HorizontalScaling + Last Transition Time: 2024-11-07T07:41:08Z + Message: ScaleUp solr-cluster-data nodes + Observed Generation: 1 + Reason: HorizontalScaleDataNode + Status: True + Type: HorizontalScaleDataNode + Last Transition Time: 2024-11-07T07:40:43Z + Message: patch pet set; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchPetSet + Last Transition Time: 2024-11-07T07:41:38Z + Message: is node in cluster; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: IsNodeInCluster + Last Transition Time: 2024-11-07T07:41:43Z + Message: ScaleUp solr-cluster-overseer nodes + Observed Generation: 1 + Reason: HorizontalScaleOverseerNode + Status: True + Type: HorizontalScaleOverseerNode + Last Transition Time: 2024-11-07T07:41:43Z + Message: Successfully completed horizontally scale Solr cluster + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 7m24s KubeDB Ops-manager Operator Start processing for SolrOpsRequest: demo/slops-hscale-up-topology + Normal Starting 7m24s KubeDB Ops-manager Operator Pausing Solr databse: demo/solr-cluster + Normal Successful 7m24s KubeDB Ops-manager Operator Successfully paused Solr database: demo/solr-cluster for SolrOpsRequest: slops-hscale-up-topology + Warning patch pet set; ConditionStatus:True 7m16s KubeDB Ops-manager Operator patch pet set; ConditionStatus:True + Warning is node in cluster; ConditionStatus:False 7m11s KubeDB Ops-manager Operator is node in cluster; ConditionStatus:False + Warning is node in cluster; ConditionStatus:True 6m54s KubeDB Ops-manager Operator is node in cluster; ConditionStatus:True + Normal HorizontalScaleDataNode 6m51s KubeDB Ops-manager Operator ScaleUp solr-cluster-data nodes + Warning patch pet set; ConditionStatus:True 6m46s KubeDB Ops-manager Operator patch pet set; ConditionStatus:True + Warning is node in cluster; ConditionStatus:False 6m41s KubeDB Ops-manager Operator is node in cluster; ConditionStatus:False + Warning is node in cluster; ConditionStatus:True 6m21s KubeDB Ops-manager Operator is node in cluster; ConditionStatus:True + Normal HorizontalScaleOverseerNode 6m16s KubeDB Ops-manager Operator ScaleUp solr-cluster-overseer nodes + Normal Starting 6m16s KubeDB Ops-manager Operator Resuming Solr database: demo/solr-cluster + Normal Successful 6m16s KubeDB Ops-manager Operator Successfully resumed Solr database: demo/solr-cluster for SolrOpsRequest: slops-hscale-up-topolog +``` + +Now, we are going to verify the number of replicas this cluster has from the Solr object, number of pods the petset have, + +**Broker Replicas** + +```bash +$ kubectl get solr -n demo solr-cluster -o json | jq '.spec.topology.data.replicas' +2 +$ kubectl get petset -n demo solr-cluster-data -o json | jq '.spec.replicas' +2 +``` + +```bash +$ kubectl get solr -n demo solr-cluster -o json | jq '.spec.topology.overseer.replicas' +2 +$ kubectl get petset -n demo solr-cluster-overseer -o json | jq '.spec.replicas' +2 +``` + +```bash +$ kubectl get solr -n demo solr-cluster -o json | jq '.spec.topology.coordinator.replicas' +2 +$ kubectl get petset -n demo solr-cluster-coordinator -o json | jq '.spec.replicas' +2 +``` + +From all the above outputs we can see that all data, overseer, coordinator of the topology Solr is `2`. That means we have successfully scaled up the replicas of the Solr topology cluster. + +### Scale Down Replicas + +Here, we are going to scale down the replicas of the Solr topology cluster to meet the desired number of replicas after scaling. + +#### Create SolrOpsRequest + +In order to scale down the replicas of the Solr topology cluster, we have to create a `SolrOpsRequest` CR with our desired replicas. Below is the YAML of the `SolrOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: slops-hscale-down-topology + namespace: demo +spec: + databaseRef: + name: solr-cluster + type: HorizontalScaling + horizontalScaling: + data: 1 + overseer: 1 + coordinator: 1 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing horizontal scaling down operation on `solr-cluster` cluster. +- `spec.type` specifies that we are performing `HorizontalScaling` on Solr. +- `spec.horizontalScaling.topology.data` specifies the desired replicas after scaling for data. +- `spec.horizontalScaling.topology.overseer` specifies the desired replicas after scaling for overseer. +- `spec.horizontalScaling.topology.coordinator` specifies the desired replicas after scaling for coordinator. + +Let's create the `SolrOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/Solr/scaling/horizontal-scaling/Solr-hscale-down-topology.yaml +solropsrequest.ops.kubedb.com/slops-hscale-down-topology created +``` + +#### Verify Topology cluster replicas scaled down successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `Solr` object and related `PetSets` and `Pods`. + +Let's wait for `SolrOpsRequest` to be `Successful`. Run the following command to watch `SolrOpsRequest` CR, + +```bash +$ watch kubectl get solropsrequest -n demo +NAME TYPE STATUS AGE +slops-hscale-down-topology HorizontalScaling Successful 2m32s +``` + +We can see from the above output that the `SolrOpsRequest` has succeeded. If we describe the `SolrOpsRequest` we will get an overview of the steps that were followed to scale the cluster. + +```bash +$ kubectl describe slops -n demo slops-hscale-down-topology +Name: slops-hscale-down-topology +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: SolrOpsRequest +Metadata: + Creation Timestamp: 2024-11-07T07:54:53Z + Generation: 1 + Resource Version: 1872016 + UID: 67c6912b-0658-43ed-af65-8cf6b249c567 +Spec: + Apply: IfReady + Database Ref: + Name: solr-cluster + Horizontal Scaling: + Data: 1 + Overseer: 1 + Type: HorizontalScaling +Status: + Conditions: + Last Transition Time: 2024-11-07T07:54:53Z + Message: Solr ops-request has started to horizontally scaling the nodes + Observed Generation: 1 + Reason: HorizontalScaling + Status: True + Type: HorizontalScaling + Last Transition Time: 2024-11-07T07:56:11Z + Message: ScaleDown solr-cluster-data nodes + Observed Generation: 1 + Reason: HorizontalScaleDataNode + Status: True + Type: HorizontalScaleDataNode + Last Transition Time: 2024-11-07T07:55:01Z + Message: reassign partitions; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: ReassignPartitions + Last Transition Time: 2024-11-07T07:55:01Z + Message: is pet set patched; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: IsPetSetPatched + Last Transition Time: 2024-11-07T07:57:21Z + Message: get pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPod + Last Transition Time: 2024-11-07T07:56:06Z + Message: delete pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DeletePvc + Last Transition Time: 2024-11-07T07:57:21Z + Message: get pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPvc + Last Transition Time: 2024-11-07T07:57:26Z + Message: ScaleDown solr-cluster-overseer nodes + Observed Generation: 1 + Reason: HorizontalScaleOverseerNode + Status: True + Type: HorizontalScaleOverseerNode + Last Transition Time: 2024-11-07T07:57:26Z + Message: Successfully completed horizontally scale Solr cluster + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 2m46s KubeDB Ops-manager Operator Start processing for SolrOpsRequest: demo/slops-hscale-down-topology + Normal Starting 2m46s KubeDB Ops-manager Operator Pausing Solr databse: demo/solr-cluster + Normal Successful 2m46s KubeDB Ops-manager Operator Successfully paused Solr database: demo/solr-cluster for SolrOpsRequest: slops-hscale-down-topology + Warning reassign partitions; ConditionStatus:True 2m38s KubeDB Ops-manager Operator reassign partitions; ConditionStatus:True + Warning is pet set patched; ConditionStatus:True 2m38s KubeDB Ops-manager Operator is pet set patched; ConditionStatus:True + Warning get pod; ConditionStatus:False 2m33s KubeDB Ops-manager Operator get pod; ConditionStatus:False + Warning get pod; ConditionStatus:True 93s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning delete pvc; ConditionStatus:True 93s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pvc; ConditionStatus:False 93s KubeDB Ops-manager Operator get pvc; ConditionStatus:False + Warning get pod; ConditionStatus:True 93s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning delete pvc; ConditionStatus:True 93s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pvc; ConditionStatus:True 93s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Normal HorizontalScaleDataNode 88s KubeDB Ops-manager Operator ScaleDown solr-cluster-data nodes + Warning is pet set patched; ConditionStatus:True 83s KubeDB Ops-manager Operator is pet set patched; ConditionStatus:True + Warning get pod; ConditionStatus:False 78s KubeDB Ops-manager Operator get pod; ConditionStatus:False + Warning get pod; ConditionStatus:True 18s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning delete pvc; ConditionStatus:True 18s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pvc; ConditionStatus:False 18s KubeDB Ops-manager Operator get pvc; ConditionStatus:False + Warning get pod; ConditionStatus:True 18s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning delete pvc; ConditionStatus:True 18s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pvc; ConditionStatus:True 18s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Normal HorizontalScaleOverseerNode 13s KubeDB Ops-manager Operator ScaleDown solr-cluster-overseer nodes + Normal Starting 13s KubeDB Ops-manager Operator Resuming Solr database: demo/solr-cluster + Normal Successful 13s KubeDB Ops-manager Operator Successfully resumed Solr database: demo/solr-cluster for SolrOpsRequest: slops-hscale-down-topology +``` + +Now, we are going to verify the number of replicas this cluster has from the Solr object, number of pods the petset have, + +Let's check the number of replicas has from Solr object, number of pods the petset have, + +**Data Replicas** + +```bash +$ kubectl get solr -n demo solr-cluster -o json | jq '.spec.topology.data.replicas' +1 +$ kubectl get petset -n demo solr-cluster-data -o json | jq '.spec.replicas' +1 +``` + +**Overseer Replicas** + +```bash +$ kubectl get solr -n demo solr-cluster -o json | jq '.spec.topology.overseer.replicas' +1 +$ kubectl get petset -n demo solr-cluster-overseer -o json | jq '.spec.replicas' +1 + +``` + +**Coordinator Replicas** + +```bash +$ kubectl get solr -n demo solr-cluster -o json | jq '.spec.topology.coordinator.replicas' +1 +$ kubectl get petset -n demo solr-cluster-coordinator -o json | jq '.spec.replicas' +1 + +``` + + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete sl -n demo solr-cluster +kubectl delete solropsrequest -n demo slops-hscale-up-topology slops-hscale-down-topology +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Solr object](/docs/guides/solr/concepts/solr.md). +- Different Solr topology clustering modes [here](/docs/guides/solr/clustering/topology_cluster.md). +- Monitor your Solr database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/solr/monitoring/prometheus-operator.md). + +- Monitor your Solr database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/solr/monitoring/prometheus-builtin.md) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/solr/scaling/vertical-scaling/_index.md b/docs/guides/solr/scaling/vertical-scaling/_index.md new file mode 100644 index 0000000000..8efef83f99 --- /dev/null +++ b/docs/guides/solr/scaling/vertical-scaling/_index.md @@ -0,0 +1,11 @@ +--- +title: Solr Vertical Scaling +menu: + docs_{{ .version }}: + identifier: sl-scaling-vertical + name: Vertical Scaling + parent: sl-scaling + weight: 40 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- \ No newline at end of file diff --git a/docs/guides/solr/scaling/vertical-scaling/combined.md b/docs/guides/solr/scaling/vertical-scaling/combined.md new file mode 100644 index 0000000000..63199e695e --- /dev/null +++ b/docs/guides/solr/scaling/vertical-scaling/combined.md @@ -0,0 +1,317 @@ +--- +title: Solr Vertical Scaling Combined +menu: + docs_{{ .version }}: + identifier: sl-scaling-vertical-combined + name: Combined Cluster + parent: sl-scaling-vertical + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Vertical Scale Solr Combined Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to update the resources of a Solr Combined cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Solr](/docs/guides/solr/concepts/solr.md) + - [Combined](/docs/guides/solr/clustering/combined_cluster.md) + - [SolrOpsRequest](/docs/guides/solr/concepts/solropsrequests.md) + - [Vertical Scaling Overview](/docs/guides/solr/scaling/vertical-scaling/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/solr](/docs/examples/solr) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Apply Vertical Scaling on Combined Cluster + +Here, we are going to deploy a `Solr` combined cluster using a supported version by `KubeDB` operator. Then we are going to apply vertical scaling on it. + +### Prepare Solr Combined Cluster + +Now, we are going to deploy a `Solr` Combined cluster database with version `3.6.1`. + +### Deploy Solr Combined Cluster + +In this section, we are going to deploy a Solr Combined cluster. Then, in the next section we will update the resources of the database using `SolrOpsRequest` CRD. Below is the YAML of the `Solr` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-combined + namespace: demo +spec: + version: 9.4.1 + replicas: 2 + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + +``` + +Let's create the `Solr` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/scaling/vertical/combined/solr.yaml +solr.kubedb.com/solr-combined created +``` + +Now, wait until `solr-cluster` has status `Ready`. i.e, + +```bash +$ kubectl get sl -n demo +NAME TYPE VERSION STATUS AGE +solr-combined kubedb.com/v1alpha2 9.4.1 Ready 63m +``` + +Let's check the Pod containers resources for `data`, `overseer` and `coordinator` of the solr Combined cluster. Run the following command to get the resources of the `broker` and `controller` containers of the Solr Combined cluster + +```bash +$ kubectl get pod -n demo solr-combined-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "2Gi" + }, + "requests": { + "cpu": "900m", + "memory": "2Gi" + } +} + +$ kubectl get pod -n demo solr-combined-1 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "2Gi" + }, + "requests": { + "cpu": "900m", + "memory": "2Gi" + } +} +``` + +This is the default resources of the Solr Combined cluster set by the `KubeDB` operator. + +We are now ready to apply the `SolrOpsRequest` CR to update the resources of this database. + +### Vertical Scaling + +Here, we are going to update the resources of the combined cluster to meet the desired resources after scaling. + +#### Create SolrOpsRequest + +In order to update the resources of the database, we have to create a `SolrOpsRequest` CR with our desired resources. Below is the YAML of the `SolrOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: slops-slops-vscale-combined-combined + namespace: demo +spec: + databaseRef: + name: solr-cluster + type: VerticalScaling + verticalScaling: + node: + resources: + limits: + cpu: 1 + memory: 2.5Gi + requests: + cpu: 1 + memory: 2.5Gi +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing vertical scaling operation on `solr-cluster` cluster. +- `spec.type` specifies that we are performing `VerticalScaling` on Solr. +- `spec.VerticalScaling.node` specifies the desired resources after scaling. + +Let's create the `SolrOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/scaling/vertical/combined/scaling.yaml +solropsrequest.ops.kubedb.com/slops-slops-vscale-combined-combined created +``` + +#### Verify Solr Combined cluster resources updated successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the resources of `Solr` object and related `PetSets` and `Pods`. + +Let's wait for `SolrOpsRequest` to be `Successful`. Run the following command to watch `SolrOpsRequest` CR, + +```bash +$ kubectl get slops -n demo +NAME TYPE STATUS AGE +slops-slops-vscale-combined-combined VerticalScaling Successful 3m9s +``` + +We can see from the above output that the `SolrOpsRequest` has succeeded. If we describe the `SolrOpsRequest` we will get an overview of the steps that were followed to scale the cluster. + +```bash +$ kubectl describe slops -n demo slops-vscale-combined +Name: slops-vscale-combined +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: SolrOpsRequest +Metadata: + Creation Timestamp: 2024-11-11T12:01:22Z + Generation: 1 + Resource Version: 2357236 + UID: 943f26ba-1cca-451c-ba90-8eeb73e5e386 +Spec: + Apply: IfReady + Database Ref: + Name: solr-combined + Type: VerticalScaling + Vertical Scaling: + Node: + Resources: + Limits: + Cpu: 1 + Memory: 2.5Gi + Requests: + Cpu: 1 + Memory: 2.5Gi +Status: + Conditions: + Last Transition Time: 2024-11-11T12:01:22Z + Message: Solr ops-request has started to vertically scaling the Solr nodes + Observed Generation: 1 + Reason: VerticalScaling + Status: True + Type: VerticalScaling + Last Transition Time: 2024-11-11T12:01:25Z + Message: Successfully updated PetSets Resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-11-11T12:03:15Z + Message: Successfully Restarted Pods With Resources + Observed Generation: 1 + Reason: RestartPods + Status: True + Type: RestartPods + Last Transition Time: 2024-11-11T12:01:30Z + Message: get pod; ConditionStatus:True; PodName:solr-combined-0 + Observed Generation: 1 + Status: True + Type: GetPod--solr-combined-0 + Last Transition Time: 2024-11-11T12:01:30Z + Message: evict pod; ConditionStatus:True; PodName:solr-combined-0 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-combined-0 + Last Transition Time: 2024-11-11T12:01:35Z + Message: running pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningPod + Last Transition Time: 2024-11-11T12:02:25Z + Message: get pod; ConditionStatus:True; PodName:solr-combined-1 + Observed Generation: 1 + Status: True + Type: GetPod--solr-combined-1 + Last Transition Time: 2024-11-11T12:02:25Z + Message: evict pod; ConditionStatus:True; PodName:solr-combined-1 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-combined-1 + Last Transition Time: 2024-11-11T12:03:15Z + Message: Successfully completed the vertical scaling for RabbitMQ + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 2m28s KubeDB Ops-manager Operator Start processing for SolrOpsRequest: demo/slops-vscale-combined + Normal Starting 2m28s KubeDB Ops-manager Operator Pausing Solr databse: demo/solr-combined + Normal Successful 2m28s KubeDB Ops-manager Operator Successfully paused Solr database: demo/solr-combined for SolrOpsRequest: slops-vscale-combined + Normal UpdatePetSets 2m25s KubeDB Ops-manager Operator Successfully updated PetSets Resources + Warning get pod; ConditionStatus:True; PodName:solr-combined-0 2m20s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:solr-combined-0 + Warning evict pod; ConditionStatus:True; PodName:solr-combined-0 2m20s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:solr-combined-0 + Warning running pod; ConditionStatus:False 2m15s KubeDB Ops-manager Operator running pod; ConditionStatus:False + Warning get pod; ConditionStatus:True; PodName:solr-combined-1 85s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:solr-combined-1 + Warning evict pod; ConditionStatus:True; PodName:solr-combined-1 85s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:solr-combined-1 + Normal RestartPods 35s KubeDB Ops-manager Operator Successfully Restarted Pods With Resources + Normal Starting 35s KubeDB Ops-manager Operator Resuming Solr database: demo/solr-combined + Normal Successful 35s KubeDB Ops-manager Operator Successfully resumed Solr database: demo/solr-combined for SolrOpsRequest: slops-vscale-combined +``` +Now, we are going to verify from one of the Pod yaml whether the resources of the Combined cluster has updated to meet up the desired state, Let's check, + +```bash +$ kubectl get pod -n demo solr-combined-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "cpu": "1", + "memory": "2560Mi" + }, + "requests": { + "cpu": "1", + "memory": "2560Mi" + } +} +$ kubectl get pod -n demo solr-combined-1 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "cpu": "1", + "memory": "2560Mi" + }, + "requests": { + "cpu": "1", + "memory": "2560Mi" + } +} + +``` + +The above output verifies that we have successfully scaled up the resources of the Solr Combined cluster. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete sl -n demo solr-cluster +kubectl delete solropsrequest -n demo slops-vscale-combined +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Solr object](/docs/guides/solr/concepts/solr.md). +- Different Solr Combined clustering modes [here](/docs/guides/solr/clustering/combined_cluster.md). +- Monitor your Solr database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/solr/monitoring/prometheus-operator.md). + +- Monitor your Solr database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/solr/monitoring/prometheus-builtin.md) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/solr/scaling/vertical-scaling/overview.md b/docs/guides/solr/scaling/vertical-scaling/overview.md new file mode 100644 index 0000000000..a4f0a139fe --- /dev/null +++ b/docs/guides/solr/scaling/vertical-scaling/overview.md @@ -0,0 +1,54 @@ +--- +title: Solr Vertical Scaling Overview +menu: + docs_{{ .version }}: + identifier: sl-scaling-vertical-overview + name: Overview + parent: sl-scaling-vertical + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Solr Vertical Scaling + +This guide will give an overview on how KubeDB Ops-manager operator updates the resources(for example CPU and Memory etc.) of the `Solr`. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [Solr](/docs/guides/solr/concepts/solr.md) + - [SolrOpsRequest](/docs/guides/solr/concepts/solropsrequests.md) + +## How Vertical Scaling Process Works + +The following diagram shows how KubeDB Ops-manager operator updates the resources of the `Solr`. Open the image in a new tab to see the enlarged version. + +
+  Vertical scaling process of Solr +
Fig: Vertical scaling process of Solr
+
+ +The vertical scaling process consists of the following steps: + +1. At first, a user creates a `Solr` Custom Resource (CR). + +2. `KubeDB` Provisioner operator watches the `Solr` CR. + +3. When the operator finds a `Solr` CR, it creates required number of `PetSets` and related necessary stuff like secrets, services, etc. + +4. Then, in order to update the resources(for example `CPU`, `Memory` etc.) of the `Solr` cluster, the user creates a `SolrOpsRequest` CR with desired information. + +5. `KubeDB` Ops-manager operator watches the `SolrOpsRequest` CR. + +6. When it finds a `SolrOpsRequest` CR, it halts the `Solr` object which is referred from the `SolrOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `Solr` object during the vertical scaling process. + +7. Then the `KubeDB` Ops-manager operator will update resources of the PetSet Pods to reach desired state. + +8. After the successful update of the resources of the PetSet's replica, the `KubeDB` Ops-manager operator updates the `Solr` object to reflect the updated state. + +9. After the successful update of the `Solr` resources, the `KubeDB` Ops-manager operator resumes the `Solr` object so that the `KubeDB` Provisioner operator resumes its usual operations. + +In the next docs, we are going to show a step by step guide on updating resources of Solr database using `SolrOpsRequest` CRD. \ No newline at end of file diff --git a/docs/guides/solr/scaling/vertical-scaling/topology.md b/docs/guides/solr/scaling/vertical-scaling/topology.md new file mode 100644 index 0000000000..7aa1ed44dd --- /dev/null +++ b/docs/guides/solr/scaling/vertical-scaling/topology.md @@ -0,0 +1,401 @@ +--- +title: Solr Vertical Scaling Topology +menu: + docs_{{ .version }}: + identifier: sl-scaling-vertical-topology + name: Topology Cluster + parent: sl-scaling-vertical + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Vertical Scale Solr Topology Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to update the resources of a Solr topology cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Solr](/docs/guides/solr/concepts/solr.md) + - [Topology](/docs/guides/solr/clustering/topology_cluster.md) + - [SolrOpsRequest](/docs/guides/solr/concepts/solropsrequests.md) + - [Vertical Scaling Overview](/docs/guides/solr/scaling/vertical-scaling/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/solr](/docs/examples/solr) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Apply Vertical Scaling on Topology Cluster + +Here, we are going to deploy a `Solr` topology cluster using a supported version by `KubeDB` operator. Then we are going to apply vertical scaling on it. + +### Prepare Solr Topology Cluster + +Now, we are going to deploy a `Solr` topology cluster database with version `3.6.1`. + +### Deploy Solr Topology Cluster + +In this section, we are going to deploy a Solr topology cluster. Then, in the next section we will update the resources of the database using `SolrOpsRequest` CRD. Below is the YAML of the `Solr` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-cluster + namespace: demo +spec: + version: 9.4.1 + zookeeperRef: + name: zoo + namespace: demo + topology: + overseer: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + data: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + coordinator: + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + +``` + +Let's create the `Solr` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/scaling/vertical/topology/solr.yaml +solr.kubedb.com/solr-cluster created +``` + +Now, wait until `solr-cluster` has status `Ready`. i.e, + +```bash +$ kubectl get sl -n demo +NAME TYPE VERSION STATUS AGE +solr-cluster kubedb.com/v1alpha2 9.4.1 Ready 63m +``` + +Let's check the Pod containers resources for `data`, `overseer` and `coordinator` of the solr topology cluster. Run the following command to get the resources of the `broker` and `controller` containers of the Solr topology cluster + +```bash +$ kubectl get pod -n demo solr-cluster-data-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "2Gi" + }, + "requests": { + "cpu": "900m", + "memory": "2Gi" + } +} +``` + +```bash +$ kubectl get pod -n demo solr-cluster-overseer-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "2Gi" + }, + "requests": { + "cpu": "900m", + "memory": "2Gi" + } +} +``` + +```bash +$ kubectl get pod -n demo solr-cluster-coordinator-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "2Gi" + }, + "requests": { + "cpu": "900m", + "memory": "2Gi" + } +} +``` +This is the default resources of the Solr topology cluster set by the `KubeDB` operator. + +We are now ready to apply the `SolrOpsRequest` CR to update the resources of this database. + +### Vertical Scaling + +Here, we are going to update the resources of the topology cluster to meet the desired resources after scaling. + +#### Create SolrOpsRequest + +In order to update the resources of the database, we have to create a `SolrOpsRequest` CR with our desired resources. Below is the YAML of the `SolrOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: slops-vscale-topology + namespace: demo +spec: + databaseRef: + name: solr-cluster + type: VerticalScaling + verticalScaling: + data: + resources: + limits: + cpu: 1 + memory: 2.5Gi + requests: + cpu: 1 + memory: 2.5Gi + overseer: + resources: + limits: + cpu: 1 + memory: 2.5Gi + requests: + cpu: 1 + memory: 2.5Gi + coordinator: + resources: + limits: + cpu: 1 + memory: 2.5Gi + requests: + cpu: 1 + memory: 2.5Gi +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing vertical scaling operation on `solr-cluster` cluster. +- `spec.type` specifies that we are performing `VerticalScaling` on Solr. +- `spec.VerticalScaling.data`, `spec.VerticalScaling.overseer` and `spec.VerticalScaling.coordinator` specifies the desired resources for topologies after scaling. + +Let's create the `SolrOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/scaling/vertical/topology/scaling.yaml +solropsrequest.ops.kubedb.com/slops-slops-vscale-topology-topology created +``` + +#### Verify Solr Topology cluster resources updated successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the resources of `Solr` object and related `PetSets` and `Pods`. + +Let's wait for `SolrOpsRequest` to be `Successful`. Run the following command to watch `SolrOpsRequest` CR, + +```bash +$ kubectl get slops -n demo +NAME TYPE STATUS AGE +slops-vscale-topology VerticalScaling Successful 3m9s +``` + +We can see from the above output that the `SolrOpsRequest` has succeeded. If we describe the `SolrOpsRequest` we will get an overview of the steps that were followed to scale the cluster. + +```bash +$ kubectl describe slops -n demo slops-vscale-topology +Name: slops-vscale-topology +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: SolrOpsRequest +Metadata: + Creation Timestamp: 2024-11-11T11:24:23Z + Generation: 1 + Resource Version: 2353035 + UID: f30a3bd6-9903-4747-96af-e4f50948afbc +Spec: + Apply: IfReady + Database Ref: + Name: solr-cluster + Type: VerticalScaling + Vertical Scaling: + Coordinator: + Resources: + Limits: + Cpu: 1 + Memory: 2.5Gi + Requests: + Cpu: 1 + Memory: 2.5Gi + Data: + Resources: + Limits: + Cpu: 1 + Memory: 2.5Gi + Requests: + Cpu: 1 + Memory: 2.5Gi + Overseer: + Resources: + Limits: + Cpu: 1 + Memory: 2.5Gi + Requests: + Cpu: 1 + Memory: 2.5Gi +Status: + Conditions: + Last Transition Time: 2024-11-11T11:24:23Z + Message: Solr ops-request has started to vertically scaling the Solr nodes + Observed Generation: 1 + Reason: VerticalScaling + Status: True + Type: VerticalScaling + Last Transition Time: 2024-11-11T11:24:23Z + Message: Successfully updated PetSets Resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-11-11T11:26:58Z + Message: Successfully Restarted Pods With Resources + Observed Generation: 1 + Reason: RestartPods + Status: True + Type: RestartPods + Last Transition Time: 2024-11-11T11:24:28Z + Message: get pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + Observed Generation: 1 + Status: True + Type: GetPod--solr-cluster-overseer-0 + Last Transition Time: 2024-11-11T11:24:28Z + Message: evict pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-cluster-overseer-0 + Last Transition Time: 2024-11-11T11:24:33Z + Message: running pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningPod + Last Transition Time: 2024-11-11T11:25:18Z + Message: get pod; ConditionStatus:True; PodName:solr-cluster-data-0 + Observed Generation: 1 + Status: True + Type: GetPod--solr-cluster-data-0 + Last Transition Time: 2024-11-11T11:25:18Z + Message: evict pod; ConditionStatus:True; PodName:solr-cluster-data-0 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-cluster-data-0 + Last Transition Time: 2024-11-11T11:26:08Z + Message: get pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + Observed Generation: 1 + Status: True + Type: GetPod--solr-cluster-coordinator-0 + Last Transition Time: 2024-11-11T11:26:08Z + Message: evict pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + Observed Generation: 1 + Status: True + Type: EvictPod--solr-cluster-coordinator-0 + Last Transition Time: 2024-11-11T11:26:58Z + Message: Successfully completed the vertical scaling for RabbitMQ + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 3m55s KubeDB Ops-manager Operator Start processing for SolrOpsRequest: demo/slops-vscale-topology + Normal UpdatePetSets 3m55s KubeDB Ops-manager Operator Successfully updated PetSets Resources + Warning get pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 3m50s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + Warning evict pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 3m50s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + Warning running pod; ConditionStatus:False 3m45s KubeDB Ops-manager Operator running pod; ConditionStatus:False + Warning get pod; ConditionStatus:True; PodName:solr-cluster-data-0 3m KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:solr-cluster-data-0 + Warning evict pod; ConditionStatus:True; PodName:solr-cluster-data-0 3m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:solr-cluster-data-0 + Warning get pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 2m10s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + Warning evict pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 2m10s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + Normal RestartPods 80s KubeDB Ops-manager Operator Successfully Restarted Pods With Resources + Normal Starting 80s KubeDB Ops-manager Operator Resuming Solr database: demo/solr-cluster + Normal Successful 80s KubeDB Ops-manager Operator Successfully resumed Solr database: demo/solr-cluster for SolrOpsRequest: slops-vscale-topology +``` +Now, we are going to verify from one of the Pod yaml whether the resources of the topology cluster has updated to meet up the desired state, Let's check, + +```bash +$ kubectl get pod -n demo solr-cluster-coordinator-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "cpu": "1", + "memory": "2560Mi" + }, + "requests": { + "cpu": "1", + "memory": "2560Mi" + } +} +$ kubectl get pod -n demo solr-cluster-data-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "cpu": "1", + "memory": "2560Mi" + }, + "requests": { + "cpu": "1", + "memory": "2560Mi" + } +} +$ kubectl get pod -n demo solr-cluster-overseer-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "cpu": "1", + "memory": "2560Mi" + }, + "requests": { + "cpu": "1", + "memory": "2560Mi" + } +} + +``` + +The above output verifies that we have successfully scaled up the resources of the Solr topology cluster. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete sl -n demo solr-cluster +kubectl delete solropsrequest -n demo slops-vscale-topology +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Solr object](/docs/guides/solr/concepts/solr.md). +- Different Solr topology clustering modes [here](/docs/guides/solr/clustering/topology_cluster.md). +- Monitor your Solr database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/solr/monitoring/prometheus-operator.md). + +- Monitor your Solr database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/solr/monitoring/prometheus-builtin.md) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/solr/tls/_index.md b/docs/guides/solr/tls/_index.md new file mode 100644 index 0000000000..3db4ae21ee --- /dev/null +++ b/docs/guides/solr/tls/_index.md @@ -0,0 +1,10 @@ +--- +title: Run Solr With TLS +menu: + docs_{{ .version }}: + identifier: sl-tls-solr + name: TLS/SSL Encryption + parent: sl-solr-guides + weight: 26 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/solr/tls/combined.md b/docs/guides/solr/tls/combined.md new file mode 100644 index 0000000000..afb6dd3782 --- /dev/null +++ b/docs/guides/solr/tls/combined.md @@ -0,0 +1,219 @@ +--- +title: Solr TLS/SSL Encryption Overview +menu: + docs_{{ .version }}: + identifier: sl-tls-combined + name: Overview + parent: sl-tls-solr + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Run Solr with TLS/SSL (Transport Encryption) + +KubeDB supports providing TLS/SSL encryption for Solr. This tutorial will show you how to use KubeDB to run a Solr combined with TLS/SSL encryption. + +## Before You Begin + +- At first, you need to have a Kubernetes combined, and the kubectl command-line tool must be configured to communicate with your combined. If you do not already have a combined, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install [`cert-manger`](https://cert-manager.io/docs/installation/) v1.0.0 or later to your combined to manage your SSL/TLS certificates. + +- Now, install KubeDB cli on your workstation and KubeDB operator in your combined following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + + ```bash + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/Solr](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/Solr) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Overview + +KubeDB uses following crd fields to enable SSL/TLS encryption in Solr. + +- `spec:` + - `enableSSL` + - `tls:` + - `issuerRef` + - `certificate` + +Read about the fields in details in [Solr concept](/docs/guides/solr/concepts/solr.md), + +`tls` is applicable for all types of Solr (i.e., `combined` and `combined`). + +Users must specify the `tls.issuerRef` field. KubeDB uses the `issuer` or `combinedIssuer` referenced in the `tls.issuerRef` field, and the certificate specs provided in `tls.certificate` to generate certificate secrets. These certificate secrets are then used to generate required certificates including `ca.crt`, `tls.crt`, `tls.key`, `keystore.jks` and `truststore.jks`. + +## Create Issuer/ combinedIssuer + +We are going to create an example `Issuer` that will be used throughout the duration of this tutorial to enable SSL/TLS in Solr. Alternatively, you can follow this [cert-manager tutorial](https://cert-manager.io/docs/configuration/ca/) to create your own `Issuer`. + +- Start off by generating you ca certificates using openssl. + +```bash +openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./ca.key -out ./ca.crt -subj "/CN=Solr/O=kubedb" +``` + +- Now create a ca-secret using the certificate files you have just generated. + +```bash +kubectl create secret tls solr-ca \ + --cert=ca.crt \ + --key=ca.key \ + --namespace=demo +``` + +Now, create an `Issuer` using the `ca-secret` you have just created. The `YAML` file looks like this: + +```yaml +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: solr-ca-issuer + namespace: demo +spec: + ca: + secretName: solr-ca +``` + +Apply the `YAML` file: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/tls/sl-issuer.yaml +issuer.cert-manager.io/solr-ca-issuer created +``` + +## TLS/SSL encryption in Solr combined combined + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-combined + namespace: demo +spec: + version: 9.4.1 + replicas: 2 + enableSSL: true + tls: + issuerRef: + apiGroup: cert-manager.io + name: solr-ca-issuer + kind: Issuer + certificates: + - alias: server + subject: + organizations: + - kubedb:server + dnsNames: + - localhost + ipAddresses: + - "127.0.0.1" + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + +``` + +### Deploy Solr combined combined with TLS/SSL + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/tls/solr-combined.yaml +solr.kubedb.com/solr-combined created +``` + +Now, wait until `solr-combined created` has status `Ready`. i.e, + +```bash +$ kubectl get sl -n demo +NAME TYPE VERSION STATUS AGE +solr-combined kubedb.com/v1alpha2 9.4.1 Ready 2m31s +``` + +### Verify TLS/SSL in Solr combined combined + +```bash +$ kubectl describe secret solr-combined-client-cert -n demo +Name: solr-combined-client-cert +Namespace: demo +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=solr-combined + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=solrs.kubedb.com + controller.cert-manager.io/fao=true +Annotations: cert-manager.io/alt-names: + *.solr-combined-pods.demo,*.solr-combined-pods.demo.svc.combined.local,localhost,solr-combined,solr-combined-pods,solr-combined-pods.demo.svc,so... + cert-manager.io/certificate-name: solr-combined-client-cert + cert-manager.io/common-name: solr-combined + cert-manager.io/ip-sans: 127.0.0.1 + cert-manager.io/issuer-group: cert-manager.io + cert-manager.io/issuer-kind: combinedIssuer + cert-manager.io/issuer-name: self-signed-issuer + cert-manager.io/uri-sans: + +Type: kubernetes.io/tls + +Data +==== +truststore.p12: 1090 bytes +ca.crt: 1147 bytes +keystore.p12: 3511 bytes +tls.crt: 1497 bytes +tls.key: 1679 bytes +``` + +Now, Let's exec into a solr data pod and verify the configuration that the TLS is enabled. + +```bash +$ kubectl exec -it -n demo solr-combined-data-0 -- bash +Defaulted container "solr" out of: solr, init-solr (init) +solr@solr-combined-data-0:/opt/solr-9.4.1$ env | grep -i SSL +JAVA_OPTS= -Djavax.net.ssl.trustStore=/var/solr/etc/truststore.p12 -Djavax.net.ssl.trustStorePassword=QyHKB(dYoT1MQYMu -Djavax.net.ssl.keyStore=/var/solr/etc/keystore.p12 -Djavax.net.ssl.keyStorePassword=QyHKB(dYoT1MQYMu -Djavax.net.ssl.keyStoreType=PKCS12 -Djavax.net.ssl.trustStoreType=PKCS12 +SOLR_SSL_TRUST_STORE_PASSWORD=QyHKB(dYoT1MQYMu +SOLR_SSL_ENABLED=true +SOLR_SSL_WANT_CLIENT_AUTH=false +SOLR_SSL_KEY_STORE_PASSWORD=QyHKB(dYoT1MQYMu +SOLR_SSL_TRUST_STORE=/var/solr/etc/truststore.p12 +SOLR_SSL_KEY_STORE=/var/solr/etc/keystore.p12 +SOLR_SSL_NEED_CLIENT_AUTH=false + +``` + +We can see from the above output that, keystore location is `/var/solr/etc` which means that TLS is enabled. + +```bash +solr@solr-combined-data-0:/var/solr/etc$ ls +ca.crt keystore.p12 tls.crt tls.key truststore.p12 + +``` + +From the above output, we can see that we are able to connect to the Solr combined using the TLS configuration. + +## Cleaning up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete solr -n demo solr-combined +kubectl delete issuer -n demo solr-ca-issuer +kubectl delete ns demo +``` + +## Next Steps + +- Monitor your Solr combined with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/solr/monitoring/prometheus-operator.md). +- Monitor your Solr combined with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/solr/monitoring/prometheus-builtin.md). +- Detail concepts of [Solr object](/docs/guides/solr/concepts/solr.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/solr/tls/overview.md b/docs/guides/solr/tls/overview.md new file mode 100644 index 0000000000..36f135bd57 --- /dev/null +++ b/docs/guides/solr/tls/overview.md @@ -0,0 +1,70 @@ +--- +title: Solr TLS/SSL Encryption Overview +menu: + docs_{{ .version }}: + identifier: sl-tls-overview + name: Overview + parent: sl-tls-solr + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Solr TLS/SSL Encryption + +**Prerequisite :** To configure TLS/SSL in `Solr`, `KubeDB` uses `cert-manager` to issue certificates. So first you have to make sure that the cluster has `cert-manager` installed. To install `cert-manager` in your cluster following steps [here](https://cert-manager.io/docs/installation/kubernetes/). + +To issue a certificate, the following crd of `cert-manager` is used: + +- `Issuer/ClusterIssuer`: Issuers, and ClusterIssuers represent certificate authorities (CAs) that are able to generate signed certificates by honoring certificate signing requests. All cert-manager certificates require a referenced issuer that is in a ready condition to attempt to honor the request. You can learn more details [here](https://cert-manager.io/docs/concepts/issuer/). + +- `Certificate`: `cert-manager` has the concept of Certificates that define a desired x509 certificate which will be renewed and kept up to date. You can learn more details [here](https://cert-manager.io/docs/concepts/certificate/). + +**Solr CRD Specification :** + +KubeDB uses following crd fields to enable SSL/TLS encryption in `Solr`. + +- `spec:` + - `enableSSL` + - `tls:` + - `issuerRef` + - `certificates` + +Read about the fields in details from [Solr concept](/docs/guides/solr/concepts/solr.md), + +When, `enableSSL` is set to `true`, the users must specify the `tls.issuerRef` field. `KubeDB` uses the `issuer` or `clusterIssuer` referenced in the `tls.issuerRef` field, and the certificate specs provided in `tls.certificate` to generate certificate secrets using `Issuer/ClusterIssuers` specification. These certificates secrets including `ca.crt`, `tls.crt` and `tls.key` etc. are used to configure `Solr` server and clients. + +## How TLS/SSL configures in Solr + +The following figure shows how `KubeDB` enterprise used to configure TLS/SSL in Solr. Open the image in a new tab to see the enlarged version. + +
+Deploy Solr with TLS/SSL +
Fig: Deploy Solr with TLS/SSL
+
+ +Deploying Solr with TLS/SSL configuration process consists of the following steps: + +1. At first, a user creates a `Issuer/ClusterIssuer` cr. + +2. Then the user creates a `Solr` CR which refers to the `Issuer/ClusterIssuer` CR that the user created in the previous step. + +3. `KubeDB` Provisioner operator watches for the `Solr` cr. + +4. When it finds one, it creates `Secret`, `Service`, etc. for the `Solr` cluster. + +5. `KubeDB` Ops-manager operator watches for `Solr`(5c), `Issuer/ClusterIssuer`(5b), `Secret` and `Service`(5a). + +6. When it finds all the resources(`Solr`, `Issuer/ClusterIssuer`, `Secret`, `Service`), it creates `Certificates` by using `tls.issuerRef` and `tls.certificates` field specification from `Solr` cr. + +7. `cert-manager` watches for certificates. + +8. When it finds one, it creates certificate secrets `tls-secrets`(server, client, exporter secrets etc.) that holds the actual certificate signed by the CA. + +9. `KubeDB` Provisioner operator watches for the Certificate secrets `tls-secrets`. + +10. When it finds all the tls-secret, it creates the related `PetSets` so that Solr database can be configured with TLS/SSL. + +In the next doc, we are going to show a step-by-step guide on how to configure a `Solr` cluster with TLS/SSL. \ No newline at end of file diff --git a/docs/guides/solr/tls/topology.md b/docs/guides/solr/tls/topology.md new file mode 100644 index 0000000000..c3e3f0e3e0 --- /dev/null +++ b/docs/guides/solr/tls/topology.md @@ -0,0 +1,237 @@ +--- +title: Solr TLS/SSL Encryption Overview +menu: + docs_{{ .version }}: + identifier: sl-tls-topology + name: Overview + parent: sl-tls-solr + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Run Solr with TLS/SSL (Transport Encryption) + +KubeDB supports providing TLS/SSL encryption for Solr. This tutorial will show you how to use KubeDB to run a Solr cluster with TLS/SSL encryption. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install [`cert-manger`](https://cert-manager.io/docs/installation/) v1.0.0 or later to your cluster to manage your SSL/TLS certificates. + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + + ```bash + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/Solr](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/Solr) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Overview + +KubeDB uses following crd fields to enable SSL/TLS encryption in Solr. + +- `spec:` + - `enableSSL` + - `tls:` + - `issuerRef` + - `certificate` + +Read about the fields in details in [Solr concept](/docs/guides/solr/concepts/solr.md), + +`tls` is applicable for all types of Solr (i.e., `combined` and `topology`). + +Users must specify the `tls.issuerRef` field. KubeDB uses the `issuer` or `clusterIssuer` referenced in the `tls.issuerRef` field, and the certificate specs provided in `tls.certificate` to generate certificate secrets. These certificate secrets are then used to generate required certificates including `ca.crt`, `tls.crt`, `tls.key`, `keystore.jks` and `truststore.jks`. + +## Create Issuer/ ClusterIssuer + +We are going to create an example `Issuer` that will be used throughout the duration of this tutorial to enable SSL/TLS in Solr. Alternatively, you can follow this [cert-manager tutorial](https://cert-manager.io/docs/configuration/ca/) to create your own `Issuer`. + +- Start off by generating you ca certificates using openssl. + +```bash +openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./ca.key -out ./ca.crt -subj "/CN=Solr/O=kubedb" +``` + +- Now create a ca-secret using the certificate files you have just generated. + +```bash +kubectl create secret tls solr-ca \ + --cert=ca.crt \ + --key=ca.key \ + --namespace=demo +``` + +Now, create an `Issuer` using the `ca-secret` you have just created. The `YAML` file looks like this: + +```yaml +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: solr-ca-issuer + namespace: demo +spec: + ca: + secretName: solr-ca +``` + +Apply the `YAML` file: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/tls/sl-issuer.yaml +issuer.cert-manager.io/solr-ca-issuer created +``` + +## TLS/SSL encryption in Solr Topology Cluster + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-cluster + namespace: demo +spec: + enableSSL: true + tls: + issuerRef: + apiGroup: cert-manager.io + name: solr-ca-issuer + kind: ClusterIssuer + certificates: + - alias: server + subject: + organizations: + - kubedb:server + dnsNames: + - localhost + ipAddresses: + - "127.0.0.1" + version: 9.4.1 + zookeeperRef: + name: zoo + namespace: demo + topology: + overseer: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + data: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + coordinator: + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard +``` + +### Deploy Solr Topology Cluster with TLS/SSL + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/tls/solr-topology.yaml +Solr.kubedb.com/solr-cluster created +``` + +Now, wait until `solr-cluster created` has status `Ready`. i.e, + +```bash +$ kubectl get sl -n demo +NAME TYPE VERSION STATUS AGE +solr-cluster kubedb.com/v1alpha2 9.4.1 Ready 2m31s +``` + +### Verify TLS/SSL in Solr Topology Cluster + +```bash +$ kubectl describe secret solr-cluster-client-cert -n demo +Name: solr-cluster-client-cert +Namespace: demo +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=solr-cluster + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=solrs.kubedb.com + controller.cert-manager.io/fao=true +Annotations: cert-manager.io/alt-names: + *.solr-cluster-pods.demo,*.solr-cluster-pods.demo.svc.cluster.local,localhost,solr-cluster,solr-cluster-pods,solr-cluster-pods.demo.svc,so... + cert-manager.io/certificate-name: solr-cluster-client-cert + cert-manager.io/common-name: solr-cluster + cert-manager.io/ip-sans: 127.0.0.1 + cert-manager.io/issuer-group: cert-manager.io + cert-manager.io/issuer-kind: ClusterIssuer + cert-manager.io/issuer-name: self-signed-issuer + cert-manager.io/uri-sans: + +Type: kubernetes.io/tls + +Data +==== +truststore.p12: 1090 bytes +ca.crt: 1147 bytes +keystore.p12: 3511 bytes +tls.crt: 1497 bytes +tls.key: 1679 bytes +``` + +Now, Let's exec into a solr data pod and verify the configuration that the TLS is enabled. + +```bash +$ kubectl exec -it -n demo solr-cluster-data-0 -- bash +Defaulted container "solr" out of: solr, init-solr (init) +solr@solr-cluster-data-0:/opt/solr-9.4.1$ env | grep -i SSL +JAVA_OPTS= -Djavax.net.ssl.trustStore=/var/solr/etc/truststore.p12 -Djavax.net.ssl.trustStorePassword=QyHKB(dYoT1MQYMu -Djavax.net.ssl.keyStore=/var/solr/etc/keystore.p12 -Djavax.net.ssl.keyStorePassword=QyHKB(dYoT1MQYMu -Djavax.net.ssl.keyStoreType=PKCS12 -Djavax.net.ssl.trustStoreType=PKCS12 +SOLR_SSL_TRUST_STORE_PASSWORD=QyHKB(dYoT1MQYMu +SOLR_SSL_ENABLED=true +SOLR_SSL_WANT_CLIENT_AUTH=false +SOLR_SSL_KEY_STORE_PASSWORD=QyHKB(dYoT1MQYMu +SOLR_SSL_TRUST_STORE=/var/solr/etc/truststore.p12 +SOLR_SSL_KEY_STORE=/var/solr/etc/keystore.p12 +SOLR_SSL_NEED_CLIENT_AUTH=false + +``` + +We can see from the above output that, keystore location is `/var/solr/etc` which means that TLS is enabled. + +```bash +solr@solr-cluster-data-0:/var/solr/etc$ ls +ca.crt keystore.p12 tls.crt tls.key truststore.p12 + +``` + +From the above output, we can see that we are able to connect to the Solr cluster using the TLS configuration. + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete solr -n demo solr-cluster +kubectl delete issuer -n demo solr-ca-issuer +kubectl delete ns demo +``` + +## Next Steps + +- Monitor your Solr cluster with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/solr/monitoring/prometheus-operator.md). +- Monitor your Solr cluster with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/solr/monitoring/prometheus-builtin.md). +- Detail concepts of [Solr object](/docs/guides/solr/concepts/solr.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/solr/update-version/_index.md b/docs/guides/solr/update-version/_index.md new file mode 100644 index 0000000000..731318839d --- /dev/null +++ b/docs/guides/solr/update-version/_index.md @@ -0,0 +1,11 @@ +--- +title: Solr Update Version +menu: + docs_{{ .version }}: + identifier: sl-update-version + name: Update Version + parent: sl-solr-guides + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- \ No newline at end of file diff --git a/docs/guides/solr/update-version/overview.md b/docs/guides/solr/update-version/overview.md new file mode 100644 index 0000000000..3ae5aa7919 --- /dev/null +++ b/docs/guides/solr/update-version/overview.md @@ -0,0 +1,54 @@ +--- +title: Update Version Overview +menu: + docs_{{ .version }}: + identifier: sl-update-version-overview + name: Overview + parent: sl-update-version + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Solr Update Version Overview + +This guide will give you an overview on how KubeDB Ops-manager operator update the version of `Solr`. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [Solr](/docs/guides/solr/concepts/solr.md) + - [SolrOpsRequest](/docs/guides/solr/concepts/solropsrequests.md) + +## How update version Process Works + +The following diagram shows how KubeDB Ops-manager operator used to update the version of `Solr`. Open the image in a new tab to see the enlarged version. + +
+  updating Process of Solr +
Fig: updating Process of Solr
+
+ +The updating process consists of the following steps: + +1. At first, a user creates a `Solr` Custom Resource (CR). + +2. `KubeDB` Provisioner operator watches the `Solr` CR. + +3. When the operator finds a `Solr` CR, it creates required number of `PetSets` and related necessary stuff like secrets, services, etc. + +4. Then, in order to update the version of the `Solr` database the user creates a `SolrOpsRequest` CR with the desired version. + +5. `KubeDB` Ops-manager operator watches the `SolrOpsRequest` CR. + +6. When it finds a `SolrOpsRequest` CR, it halts the `Solr` object which is referred from the `SolrOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `Solr` object during the updating process. + +7. By looking at the target version from `SolrOpsRequest` CR, `KubeDB` Ops-manager operator updates the images of all the `PetSets`. + +8. After successfully updating the `PetSets` and their `Pods` images, the `KubeDB` Ops-manager operator updates the image of the `Solr` object to reflect the updated state of the database. + +9. After successfully updating of `Solr` object, the `KubeDB` Ops-manager operator resumes the `Solr` object so that the `KubeDB` Provisioner operator can resume its usual operations. + +In the next doc, we are going to show a step by step guide on updating of a Solr database using updateVersion operation. \ No newline at end of file diff --git a/docs/guides/solr/update-version/update-version.md b/docs/guides/solr/update-version/update-version.md new file mode 100644 index 0000000000..0ff8f482da --- /dev/null +++ b/docs/guides/solr/update-version/update-version.md @@ -0,0 +1,273 @@ +--- +title: Update Version of Solr +menu: + docs_{{ .version }}: + identifier: sl-update-version-solr + name: Update Version OpsRequest + parent: sl-update-version + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Update version of Solr + +This guide will show you how to use `KubeDB` Ops-manager operator to update the version of `Solr` Combined or Topology. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Solr](/docs/guides/solr/concepts/solr.md) + - [SolrOpsRequest](/docs/guides/solr/concepts/solropsrequests.md) + - [Updating Overview](/docs/guides/solr/update-version/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/Solr](/docs/examples/solr) directory of [kubedb/docs](https://github.com/kube/docs) repository. + +## Prepare Solr + +Now, we are going to deploy a `Solr` replicaset database with version `3.6.8`. + +### Deploy Solr + +In this section, we are going to deploy a Solr topology cluster. Then, in the next section we will update the version using `SolrOpsRequest` CRD. Below is the YAML of the `Solr` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-cluster + namespace: demo +spec: + version: 9.4.1 + zookeeperRef: + name: zoo + namespace: demo + topology: + overseer: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + data: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + coordinator: + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + +``` + +Let's create the `Solr` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/update-version/solr.yaml +solr.kubedb.com/solr-cluster created +``` + +Now, wait until `solr-cluster` created has status `Ready`. i.e, + +```bash +$ kubectl get kf -n demo -w +NAME TYPE VERSION STATUS AGE +Solr-prod kubedb.com/v1 3.5.2 Provisioning 0s +Solr-prod kubedb.com/v1 3.5.2 Provisioning 55s +. +. +Solr-prod kubedb.com/v1 3.5.2 Ready 119s +``` + +We are now ready to apply the `SolrOpsRequest` CR to update. + +### update Solr Version + +Here, we are going to update `Solr` from `9.4.1` to `9.6.1`. + +#### Create SolrOpsRequest: + +In order to update the version, we have to create a `SolrOpsRequest` CR with your desired version that is supported by `KubeDB`. Below is the YAML of the `SolrOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: solr-update-version + namespace: demo +spec: + databaseRef: + name: solr-cluster + type: UpdateVersion + updateVersion: + targetVersion: 9.6.1 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing operation on `solr-cluster` Solr. +- `spec.type` specifies that we are going to perform `UpdateVersion` on our database. +- `spec.updateVersion.targetVersion` specifies the expected version of the database `9.6.1`. + +> **Note:** If you want to update combined Solr, you just refer to the `Solr` combined object name in `spec.databaseRef.name`. To create a combined Solr, you can refer to the [Solr Combined](/docs/guides/solr/clustering/combined_cluster.md) guide. + +Let's create the `SolrOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/Solr/update-version/update-version-ops.yaml +solropsrequest.ops.kubedb.com/solr-update-version created +``` + +#### Verify Solr version updated successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the image of `Solr` object and related `PetSets` and `Pods`. + +Let's wait for `SolrOpsRequest` to be `Successful`. Run the following command to watch `SolrOpsRequest` CR, + +```bash +$ kubectl get Solropsrequest -n demo +NAME TYPE STATUS AGE +solr-update-version UpdateVersion Successful 2m6s +``` + +We can see from the above output that the `SolrOpsRequest` has succeeded. If we describe the `SolrOpsRequest` we will get an overview of the steps that were followed to update the database version. + +```bash +$ kubectl get slops -n demo solr-update-version -oyaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"ops.kubedb.com/v1alpha1","kind":"SolrOpsRequest","metadata":{"annotations":{},"name":"solr-update-version","namespace":"demo"},"spec":{"databaseRef":{"name":"solr-cluster"},"type":"UpdateVersion","updateVersion":{"targetVersion":"9.6.1"}}} + creationTimestamp: "2024-11-06T07:10:42Z" + generation: 1 + name: solr-update-version + namespace: demo + resourceVersion: "1753051" + uid: caf69d71-1894-4da1-931f-a8f7ff8088a7 +spec: + apply: IfReady + databaseRef: + name: solr-cluster + type: UpdateVersion + updateVersion: + targetVersion: 9.6.1 +status: + conditions: + - lastTransitionTime: "2024-11-06T07:10:42Z" + message: Solr ops-request has started to update version + observedGeneration: 1 + reason: UpdateVersion + status: "True" + type: UpdateVersion + - lastTransitionTime: "2024-11-06T07:10:50Z" + message: successfully reconciled the Solr with updated version + observedGeneration: 1 + reason: UpdatePetSets + status: "True" + type: UpdatePetSets + - lastTransitionTime: "2024-11-06T07:13:20Z" + message: Successfully Restarted Solr nodes + observedGeneration: 1 + reason: RestartPods + status: "True" + type: RestartPods + - lastTransitionTime: "2024-11-06T07:10:55Z" + message: get pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + observedGeneration: 1 + status: "True" + type: GetPod--solr-cluster-overseer-0 + - lastTransitionTime: "2024-11-06T07:10:55Z" + message: evict pod; ConditionStatus:True; PodName:solr-cluster-overseer-0 + observedGeneration: 1 + status: "True" + type: EvictPod--solr-cluster-overseer-0 + - lastTransitionTime: "2024-11-06T07:11:00Z" + message: running pod; ConditionStatus:False + observedGeneration: 1 + status: "False" + type: RunningPod + - lastTransitionTime: "2024-11-06T07:11:40Z" + message: get pod; ConditionStatus:True; PodName:solr-cluster-data-0 + observedGeneration: 1 + status: "True" + type: GetPod--solr-cluster-data-0 + - lastTransitionTime: "2024-11-06T07:11:40Z" + message: evict pod; ConditionStatus:True; PodName:solr-cluster-data-0 + observedGeneration: 1 + status: "True" + type: EvictPod--solr-cluster-data-0 + - lastTransitionTime: "2024-11-06T07:12:25Z" + message: get pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + observedGeneration: 1 + status: "True" + type: GetPod--solr-cluster-coordinator-0 + - lastTransitionTime: "2024-11-06T07:12:25Z" + message: evict pod; ConditionStatus:True; PodName:solr-cluster-coordinator-0 + observedGeneration: 1 + status: "True" + type: EvictPod--solr-cluster-coordinator-0 + - lastTransitionTime: "2024-11-06T07:13:20Z" + message: Successfully updated SOlr version + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful + 61s KubeDB Ops-manager Operator Successfully resumed Solr database: demo/Solr-prod for SolrOpsRequest: Solr-update-version +``` + +Now, we are going to verify whether the `Solr` and the related `PetSets` and their `Pods` have the new version image. Let's check, + +```bash +$ kubectl get sl -n demo solr-cluster -o=jsonpath='{.spec.version}{"\n"}' +9.6.1 +~/y/s/ops (main|✚23…) $ kubectl get petset -n demo Solr-cluster-data -o=jsonpath='{.spec.template.spec.containers[0].image}{"\n"}' +ghcr.io/appscode-images/daily/solr:9.6.1_20241024@sha256:0996340eff1e59bcac49eb8f96c28f0a3efb061f0e91b2053bfb7dade860c0e4 + +``` + +You can see from above, our `Solr` has been updated with the new version. So, the updateVersion process is successfully completed. + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete solropsrequest -n demo solr-update-version +kubectl delete solr -n demo solr-cluster +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Solr object](/docs/guides/solr/concepts/solr.md). +- Different Solr topology clustering modes [here](/docs/guides/solr/clustering/topology_cluster.md). +- Monitor your Solr database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/solr/monitoring/prometheus-operator.md). + +- Monitor your Solr database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/solr/monitoring/prometheus-builtin.md) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/solr/volume-expansion/_index.md b/docs/guides/solr/volume-expansion/_index.md new file mode 100644 index 0000000000..3786b97f83 --- /dev/null +++ b/docs/guides/solr/volume-expansion/_index.md @@ -0,0 +1,11 @@ +--- +title: Solr Volume Expansion +menu: + docs_{{ .version }}: + identifier: sl-volume-expansion + name: Volume Expansion + parent: sl-solr-guides + weight: 24 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- \ No newline at end of file diff --git a/docs/guides/solr/volume-expansion/combined.md b/docs/guides/solr/volume-expansion/combined.md new file mode 100644 index 0000000000..e86c6af90b --- /dev/null +++ b/docs/guides/solr/volume-expansion/combined.md @@ -0,0 +1,361 @@ +--- +title: Solr Combined Volume Expansion +menu: + docs_{{ .version }}: + identifier: sl-volume-expansion-combined + name: Combined Cluster + parent: sl-volume-expansion + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Solr Combined Volume Expansion + +This guide will show you how to use `KubeDB` Ops-manager operator to expand the volume of a Solr Combined Cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- You must have a `StorageClass` that supports volume expansion. + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Solr](/docs/guides/solr/concepts/solr.md) + - [Combined](/docs/guides/solr/clustering/combined_cluster.md) + - [SolrOpsRequest](/docs/guides/solr/concepts/solropsrequests.md) + - [Volume Expansion Overview](/docs/guides/solr/volume-expansion/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> Note: The yaml files used in this tutorial are stored in [docs/examples/Solr](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/Solr) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Expand Volume of Combined Solr Cluster + +Here, we are going to deploy a `Solr` combined using a supported version by `KubeDB` operator. Then we are going to apply `SolrOpsRequest` to expand its volume. + +### Prepare Solr Combined CLuster + +At first verify that your cluster has a storage class, that supports volume expansion. Let's check, + +```bash +$ kubectl get sc +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +local-path (default) rancher.io/local-path Delete WaitForFirstConsumer false 24d +``` + +We can see from the output the `local-path` storage class has `ALLOWVOLUMEEXPANSION` field as true. So, this storage class supports volume expansion. We can use it. + +Now, we are going to deploy a `Solr` combined cluster with version `9.4.1`. + +### Deploy Solr + +In this section, we are going to deploy a Solr combined cluster with 1GB volume. Then, in the next section we will expand its volume to 2GB using `SolrOpsRequest` CRD. Below is the YAML of the `Solr` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-combined + namespace: demo +spec: + version: 9.4.1 + replicas: 2 + zookeeperRef: + name: zoo + namespace: demo + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +``` + +Let's create the `Solr` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/volume-expansion/combined.yaml +Solr.kubedb.com/Solr-dev created +``` + +Now, wait until `Solr-dev` has status `Ready`. i.e, + +```bash +$ kubectl get sl -n demo +NAME TYPE VERSION STATUS AGE +solr-combined kubedb.com/v1alpha2 9.4.1 Ready 23m +``` + +Let's check volume size from petset, and from the persistent volume, + +```bash +$ kubectl get petset -n demo solr-combined -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"1Gi" + +$ kubectl get pv -n demo +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE +pvc-02cddba5-1d6a-4f1b-91b2-a7e55857b6b7 1Gi RWO Delete Bound demo/solr-combined-data-solr-combined-1 longhorn 23m +pvc-61b8f97a-a588-4125-99f3-604f6a70d560 1Gi RWO Delete Bound demo/solr-combined-data-solr-combined-0 longhorn 24m +``` + +You can see the petset has 1GB storage, and the capacity of all the persistent volumes are also 1GB. + +We are now ready to apply the `SolrOpsRequest` CR to expand the volume of this database. + +### Volume Expansion + +Here, we are going to expand the volume of the Solr combined cluster. + +#### Create SolrOpsRequest + +In order to expand the volume of the database, we have to create a `SolrOpsRequest` CR with our desired volume size. Below is the YAML of the `SolrOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: sl-volume-exp-combined + namespace: demo +spec: + apply: IfReady + databaseRef: + name: solr-cluster + type: VolumeExpansion + volumeExpansion: + mode: Offline + node: 11Gi +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing volume expansion operation on `Solr-dev`. +- `spec.type` specifies that we are performing `VolumeExpansion` on our database. +- `spec.volumeExpansion.node` specifies the desired volume size. + +Let's create the `SolrOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/volume-expansion/solr-volume-expansion-combined.yaml +solropsrequest.ops.kubedb.com/sl-volume-exp-combined created +``` + +#### Verify Solr Combined volume expanded successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the volume size of `Solr` object and related `PetSets` and `Persistent Volumes`. + +Let's wait for `SolrOpsRequest` to be `Successful`. Run the following command to watch `SolrOpsRequest` CR, + +```bash +$ kubectl get slops -n demo +NAME TYPE STATUS AGE +sl-volume-exp-topology VolumeExpansion Successful 3m +``` + +We can see from the above output that the `SolrOpsRequest` has succeeded. If we describe the `SolrOpsRequest` we will get an overview of the steps that were followed to expand the volume of the database. + +```bash +$ kubectl describe slops -n demo sl-volume-exp-topology +Name: sl-volume-exp-topology +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: SolrOpsRequest +Metadata: + Creation Timestamp: 2024-11-12T07:59:08Z + Generation: 1 + Resource Version: 2453072 + UID: efa404a1-0cdf-46a9-9995-3f3fca88fa4a +Spec: + Apply: IfReady + Database Ref: + Name: solr-combined + Type: VolumeExpansion + Volume Expansion: + Mode: Offline + Node: 11Gi +Status: + Conditions: + Last Transition Time: 2024-11-12T07:59:08Z + Message: Solr ops-request has started to expand volume of solr nodes. + Observed Generation: 1 + Reason: VolumeExpansion + Status: True + Type: VolumeExpansion + Last Transition Time: 2024-11-12T07:59:26Z + Message: successfully deleted the petSets with orphan propagation policy + Observed Generation: 1 + Reason: OrphanPetSetPods + Status: True + Type: OrphanPetSetPods + Last Transition Time: 2024-11-12T07:59:16Z + Message: get petset; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPetset + Last Transition Time: 2024-11-12T07:59:16Z + Message: delete petset; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DeletePetset + Last Transition Time: 2024-11-12T08:01:31Z + Message: successfully updated combined node PVC sizes + Observed Generation: 1 + Reason: VolumeExpansionCombinedNode + Status: True + Type: VolumeExpansionCombinedNode + Last Transition Time: 2024-11-12T08:01:06Z + Message: get pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPod + Last Transition Time: 2024-11-12T07:59:31Z + Message: patch ops request; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchOpsRequest + Last Transition Time: 2024-11-12T07:59:31Z + Message: delete pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DeletePod + Last Transition Time: 2024-11-12T08:00:06Z + Message: get pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPvc + Last Transition Time: 2024-11-12T08:00:06Z + Message: patch pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchPvc + Last Transition Time: 2024-11-12T08:01:21Z + Message: compare storage; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CompareStorage + Last Transition Time: 2024-11-12T08:00:21Z + Message: create pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CreatePod + Last Transition Time: 2024-11-12T08:01:36Z + Message: successfully reconciled the Solr resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-11-12T08:01:41Z + Message: PetSet is recreated + Observed Generation: 1 + Reason: ReadyPetSets + Status: True + Type: ReadyPetSets + Last Transition Time: 2024-11-12T08:01:41Z + Message: get pet set; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPetSet + Last Transition Time: 2024-11-12T08:01:41Z + Message: Successfully completed volumeExpansion for Solr + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 3m29s KubeDB Ops-manager Operator Start processing for SolrOpsRequest: demo/sl-volume-exp-topology + Normal Starting 3m29s KubeDB Ops-manager Operator Pausing Solr databse: demo/solr-combined + Normal Successful 3m29s KubeDB Ops-manager Operator Successfully paused Solr database: demo/solr-combined for SolrOpsRequest: sl-volume-exp-topology + Warning get petset; ConditionStatus:True 3m21s KubeDB Ops-manager Operator get petset; ConditionStatus:True + Warning delete petset; ConditionStatus:True 3m21s KubeDB Ops-manager Operator delete petset; ConditionStatus:True + Warning get petset; ConditionStatus:True 3m16s KubeDB Ops-manager Operator get petset; ConditionStatus:True + Normal OrphanPetSetPods 3m11s KubeDB Ops-manager Operator successfully deleted the petSets with orphan propagation policy + Warning get pod; ConditionStatus:True 3m6s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning patch ops request; ConditionStatus:True 3m6s KubeDB Ops-manager Operator patch ops request; ConditionStatus:True + Warning delete pod; ConditionStatus:True 3m6s KubeDB Ops-manager Operator delete pod; ConditionStatus:True + Warning get pod; ConditionStatus:False 3m1s KubeDB Ops-manager Operator get pod; ConditionStatus:False + Warning get pod; ConditionStatus:True 2m31s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 2m31s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning patch pvc; ConditionStatus:True 2m31s KubeDB Ops-manager Operator patch pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:False 2m31s KubeDB Ops-manager Operator compare storage; ConditionStatus:False + Warning get pod; ConditionStatus:True 2m26s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 2m26s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 2m21s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 2m21s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 2m16s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 2m16s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:True 2m16s KubeDB Ops-manager Operator compare storage; ConditionStatus:True + Warning create pod; ConditionStatus:True 2m16s KubeDB Ops-manager Operator create pod; ConditionStatus:True + Warning patch ops request; ConditionStatus:True 2m16s KubeDB Ops-manager Operator patch ops request; ConditionStatus:True + Warning get pod; ConditionStatus:True 2m11s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 2m6s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning patch ops request; ConditionStatus:True 2m6s KubeDB Ops-manager Operator patch ops request; ConditionStatus:True + Warning delete pod; ConditionStatus:True 2m6s KubeDB Ops-manager Operator delete pod; ConditionStatus:True + Warning get pod; ConditionStatus:False 2m1s KubeDB Ops-manager Operator get pod; ConditionStatus:False + Warning get pod; ConditionStatus:True 91s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 91s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning patch pvc; ConditionStatus:True 91s KubeDB Ops-manager Operator patch pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:False 91s KubeDB Ops-manager Operator compare storage; ConditionStatus:False + Warning get pod; ConditionStatus:True 86s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 86s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 81s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 81s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 76s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 76s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:True 76s KubeDB Ops-manager Operator compare storage; ConditionStatus:True + Warning create pod; ConditionStatus:True 76s KubeDB Ops-manager Operator create pod; ConditionStatus:True + Warning patch ops request; ConditionStatus:True 76s KubeDB Ops-manager Operator patch ops request; ConditionStatus:True + Warning get pod; ConditionStatus:True 71s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Normal VolumeExpansionCombinedNode 66s KubeDB Ops-manager Operator successfully updated combined node PVC sizes + Normal UpdatePetSets 61s KubeDB Ops-manager Operator successfully reconciled the Solr resources + Warning get pet set; ConditionStatus:True 56s KubeDB Ops-manager Operator get pet set; ConditionStatus:True + Normal ReadyPetSets 56s KubeDB Ops-manager Operator PetSet is recreated + Normal Starting 56s KubeDB Ops-manager Operator Resuming Solr database: demo/solr-combined + Normal Successful 56s KubeDB Ops-manager Operator Successfully resumed Solr database: demo/solr-combined for SolrOpsRequest: sl-volume-exp-topology +``` + +Now, we are going to verify from the `Petset`, and the `Persistent Volumes` whether the volume of the database has expanded to meet the desired state, Let's check, + +```bash +$ kubectl get petset -n demo solr-combined -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"11Gi" +$ kubectl get pv -n demo +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE +pvc-02cddba5-1d6a-4f1b-91b2-a7e55857b6b7 11Gi RWO Delete Bound demo/solr-combined-data-solr-combined-1 longhorn 33m +pvc-61b8f97a-a588-4125-99f3-604f6a70d560 11Gi RWO Delete Bound demo/solr-combined-data-solr-combined-0 longhorn 33m +``` + +The above output verifies that we have successfully expanded the volume of the Solr. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete solropsrequest -n demo sl-volume-exp-combined +kubectl delete sl -n demo solr-combined +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Solr object](/docs/guides/solr/concepts/solr.md). +- Different Solr topology clustering modes [here](/docs/guides/solr/clustering/topology_cluster.md). +- Monitor your Solr database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/solr/monitoring/prometheus-operator.md). + +- Monitor your Solr database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/solr/monitoring/prometheus-builtin.md) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/solr/volume-expansion/overview.md b/docs/guides/solr/volume-expansion/overview.md new file mode 100644 index 0000000000..f4b8f1f078 --- /dev/null +++ b/docs/guides/solr/volume-expansion/overview.md @@ -0,0 +1,56 @@ +--- +title: Solr Volume Expansion Overview +menu: + docs_{{ .version }}: + identifier: sl-volume-expansion-overview + name: Overview + parent: sl-volume-expansion + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Solr Volume Expansion + +This guide will give an overview on how KubeDB Ops-manager operator expand the volume of various component of `Solr` like:. (Combined and Topology). + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [Solr](/docs/guides/solr/concepts/solr.md) + - [SolrOpsRequest](/docs/guides/solr/concepts/solropsrequests.md) + +## How Volume Expansion Process Works + +The following diagram shows how KubeDB Ops-manager operator expand the volumes of `Solr` database components. Open the image in a new tab to see the enlarged version. + +
+  Volume Expansion process of Solr +
Fig: Volume Expansion process of Solr
+
+ +The Volume Expansion process consists of the following steps: + +1. At first, a user creates a `Solr` Custom Resource (CR). + +2. `KubeDB` Provisioner operator watches the `Solr` CR. + +3. When the operator finds a `Solr` CR, it creates required number of `PetSets` and related necessary stuff like secrets, services, etc. + +4. Each PetSet creates a Persistent Volume according to the Volume Claim Template provided in the petset configuration. This Persistent Volume will be expanded by the `KubeDB` Ops-manager operator. + +5. Then, in order to expand the volume of the various components (ie. Combined, Broker, Controller) of the `Solr`, the user creates a `SolrOpsRequest` CR with desired information. + +6. `KubeDB` Ops-manager operator watches the `SolrOpsRequest` CR. + +7. When it finds a `SolrOpsRequest` CR, it halts the `Solr` object which is referred from the `SolrOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `Solr` object during the volume expansion process. + +8. Then the `KubeDB` Ops-manager operator will expand the persistent volume to reach the expected size defined in the `SolrOpsRequest` CR. + +9. After the successful Volume Expansion of the related PetSet Pods, the `KubeDB` Ops-manager operator updates the new volume size in the `Solr` object to reflect the updated state. + +10. After the successful Volume Expansion of the `Solr` components, the `KubeDB` Ops-manager operator resumes the `Solr` object so that the `KubeDB` Provisioner operator resumes its usual operations. + +In the next docs, we are going to show a step-by-step guide on Volume Expansion of various Solr database components using `SolrOpsRequest` CRD. diff --git a/docs/guides/solr/volume-expansion/topology.md b/docs/guides/solr/volume-expansion/topology.md new file mode 100644 index 0000000000..3d7ec0c534 --- /dev/null +++ b/docs/guides/solr/volume-expansion/topology.md @@ -0,0 +1,419 @@ +--- +title: Solr Topology Volume Expansion +menu: + docs_{{ .version }}: + identifier: sl-volume-expansion-topology + name: Topology Cluster + parent: sl-volume-expansion + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Solr Topology Volume Expansion + +This guide will show you how to use `KubeDB` Ops-manager operator to expand the volume of a Solr Topology Cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- You must have a `StorageClass` that supports volume expansion. + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Solr](/docs/guides/solr/concepts/solr.md) + - [Topology](/docs/guides/solr/clustering/topology_cluster.md) + - [SolrOpsRequest](/docs/guides/solr/concepts/solropsrequests.md) + - [Volume Expansion Overview](/docs/guides/solr/volume-expansion/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> Note: The yaml files used in this tutorial are stored in [docs/examples/Solr](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/Solr) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Expand Volume of Topology Solr Cluster + +Here, we are going to deploy a `Solr` topology using a supported version by `KubeDB` operator. Then we are going to apply `SolrOpsRequest` to expand its volume. + +### Prepare Solr Topology Cluster + +At first verify that your cluster has a storage class, that supports volume expansion. Let's check, + +```bash +$ kubectl get sc +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +local-path (default) rancher.io/local-path Delete WaitForFirstConsumer false 24d +``` + +We can see from the output the `local-path` storage class has `ALLOWVOLUMEEXPANSION` field as false. So, this storage class supports volume expansion. We can use it. + +Now, we are going to deploy a `Solr` combined cluster with version `9.4.1`. + +### Deploy Solr + +In this section, we are going to deploy a Solr topology cluster for broker and controller with 1GB volume. Then, in the next section we will expand its volume to 2GB using `SolrOpsRequest` CRD. Below is the YAML of the `Solr` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Solr +metadata: + name: solr-cluster + namespace: demo +spec: + version: 9.4.1 + zookeeperRef: + name: zoo + namespace: demo + topology: + overseer: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + data: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + coordinator: + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +``` + +Let's create the `Solr` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/volume-expansion/topology.yaml +solr.kubedb.com/solr-cluster created +``` + +Now, wait until `solr-cluster` has status `Ready`. i.e, + +```bash +$ kubectl get sl -n demo +NAME TYPE VERSION STATUS AGE +solr-cluster kubedb.com/v1alpha2 9.4.1 Ready 41m + +``` + +Let's check volume size from petset, and from the persistent volume, + +```bash +$ kubectl get petset -n demo solr-cluster-overseer -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"1Gi" +$ kubectl get petset -n demo solr-cluster-data -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"1Gi" +$ kubectl get petset -n demo solr-cluster-coordinator -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"1Gi" +$ kubectl get pv -n demo +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE +pvc-31538e3e-2d02-4ca0-9b76-5da7c63cea70 1Gi RWO Delete Bound demo/solr-cluster-data-solr-cluster-data-0 longhorn 44m +pvc-8c5b14ab-3da4-4492-abf4-edd7faa265ef 1Gi RWO Delete Bound demo/solr-cluster-data-solr-cluster-overseer-0 longhorn 44m +pvc-95522f35-52bd-4978-b66f-1979cec34982 1Gi RWO Delete Bound demo/solr-cluster-data-solr-cluster-coordinator-0 longhorn 44m +``` + +You can see the petsets have 1GB storage, and the capacity of all the persistent volumes are also 1GB. + +We are now ready to apply the `SolrOpsRequest` CR to expand the volume of this database. + +### Volume Expansion + +Here, we are going to expand the volume of the Solr topology cluster. + +#### Create SolrOpsRequest + +In order to expand the volume of the database, we have to create a `SolrOpsRequest` CR with our desired volume size. Below is the YAML of the `SolrOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: SolrOpsRequest +metadata: + name: sl-volume-exp-topology + namespace: demo +spec: + apply: IfReady + databaseRef: + name: solr-cluster + type: VolumeExpansion + volumeExpansion: + mode: Offline + data: 11Gi + overseer : 11Gi + +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing volume expansion operation on `Solr-prod`. +- `spec.type` specifies that we are performing `VolumeExpansion` on our database. +- `spec.volumeExpansion.data` specifies the desired volume size for data node. +- `spec.volumeExpansion.overseer` specifies the desired volume size for overseer node. +- `spec.volumeExpansion.coordinator` specifies the desired volume size for coordinator node. + +> If you want to expand the volume of only one node, you can specify the desired volume size for that node only. + +Let's create the `SolrOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/solr/volume-expansion/solr-volume-expansion-topology.yaml +solropsrequest.ops.kubedb.com/sl-volume-exp-topology created +``` + +#### Verify Solr Topology volume expanded successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the volume size of `Solr` object and related `PetSets` and `Persistent Volumes`. + +Let's wait for `SolrOpsRequest` to be `Successful`. Run the following command to watch `SolrOpsRequest` CR, + +```bash +$ kubectl get solropsrequest -n demo +NAME TYPE STATUS AGE +sl-volume-exp-topology VolumeExpansion Successful 3m1s +``` + +We can see from the above output that the `SolrOpsRequest` has succeeded. If we describe the `SolrOpsRequest` we will get an overview of the steps that were followed to expand the volume of Solr. + +```bash +$ kubectl describe slops -n demo sl-volume-exp-topology +Name: sl-volume-exp-topology +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: SolrOpsRequest +Metadata: + Creation Timestamp: 2024-11-12T06:38:29Z + Generation: 1 + Resource Version: 2444852 + UID: 2ea88297-45d1-4f48-b21a-8ede43d3ee69 +Spec: + Apply: IfReady + Database Ref: + Name: solr-cluster + Type: VolumeExpansion + Volume Expansion: + Data: 11Gi + Mode: Offline + Overseer: 11Gi +Status: + Conditions: + Last Transition Time: 2024-11-12T06:38:29Z + Message: Solr ops-request has started to expand volume of solr nodes. + Observed Generation: 1 + Reason: VolumeExpansion + Status: True + Type: VolumeExpansion + Last Transition Time: 2024-11-12T06:39:03Z + Message: successfully deleted the petSets with orphan propagation policy + Observed Generation: 1 + Reason: OrphanPetSetPods + Status: True + Type: OrphanPetSetPods + Last Transition Time: 2024-11-12T06:38:43Z + Message: get petset; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPetset + Last Transition Time: 2024-11-12T06:38:43Z + Message: delete petset; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DeletePetset + Last Transition Time: 2024-11-12T06:40:13Z + Message: successfully updated data node PVC sizes + Observed Generation: 1 + Reason: VolumeExpansionDataNode + Status: True + Type: VolumeExpansionDataNode + Last Transition Time: 2024-11-12T06:40:53Z + Message: get pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPod + Last Transition Time: 2024-11-12T06:39:08Z + Message: patch ops request; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchOpsRequest + Last Transition Time: 2024-11-12T06:39:08Z + Message: delete pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DeletePod + Last Transition Time: 2024-11-12T06:39:43Z + Message: get pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPvc + Last Transition Time: 2024-11-12T06:39:43Z + Message: patch pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchPvc + Last Transition Time: 2024-11-12T06:41:13Z + Message: compare storage; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CompareStorage + Last Transition Time: 2024-11-12T06:40:03Z + Message: create pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CreatePod + Last Transition Time: 2024-11-12T06:41:38Z + Message: successfully updated overseer node PVC sizes + Observed Generation: 1 + Reason: VolumeExpansionOverseerNode + Status: True + Type: VolumeExpansionOverseerNode + Last Transition Time: 2024-11-12T06:41:18Z + Message: running solr; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningSolr + Last Transition Time: 2024-11-12T06:41:44Z + Message: successfully reconciled the Solr resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-11-12T06:41:49Z + Message: PetSet is recreated + Observed Generation: 1 + Reason: ReadyPetSets + Status: True + Type: ReadyPetSets + Last Transition Time: 2024-11-12T06:41:49Z + Message: get pet set; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPetSet + Last Transition Time: 2024-11-12T06:41:49Z + Message: Successfully completed volumeExpansion for Solr + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 3m39s KubeDB Ops-manager Operator Start processing for SolrOpsRequest: demo/sl-volume-exp-topology + Normal Starting 3m39s KubeDB Ops-manager Operator Pausing Solr databse: demo/solr-cluster + Normal Successful 3m39s KubeDB Ops-manager Operator Successfully paused Solr database: demo/solr-cluster for SolrOpsRequest: sl-volume-exp-topology + Warning get petset; ConditionStatus:True 3m25s KubeDB Ops-manager Operator get petset; ConditionStatus:True + Warning delete petset; ConditionStatus:True 3m25s KubeDB Ops-manager Operator delete petset; ConditionStatus:True + Warning get petset; ConditionStatus:True 3m20s KubeDB Ops-manager Operator get petset; ConditionStatus:True + Warning get petset; ConditionStatus:True 3m15s KubeDB Ops-manager Operator get petset; ConditionStatus:True + Warning delete petset; ConditionStatus:True 3m15s KubeDB Ops-manager Operator delete petset; ConditionStatus:True + Warning get petset; ConditionStatus:True 3m10s KubeDB Ops-manager Operator get petset; ConditionStatus:True + Normal OrphanPetSetPods 3m5s KubeDB Ops-manager Operator successfully deleted the petSets with orphan propagation policy + Warning get pod; ConditionStatus:True 3m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning patch ops request; ConditionStatus:True 3m KubeDB Ops-manager Operator patch ops request; ConditionStatus:True + Warning delete pod; ConditionStatus:True 3m KubeDB Ops-manager Operator delete pod; ConditionStatus:True + Warning get pod; ConditionStatus:False 2m55s KubeDB Ops-manager Operator get pod; ConditionStatus:False + Warning get pod; ConditionStatus:True 2m25s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 2m25s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning patch pvc; ConditionStatus:True 2m25s KubeDB Ops-manager Operator patch pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:False 2m25s KubeDB Ops-manager Operator compare storage; ConditionStatus:False + Warning get pod; ConditionStatus:True 2m20s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 2m20s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 2m15s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 2m15s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 2m10s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 2m10s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 2m5s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 2m5s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:True 2m5s KubeDB Ops-manager Operator compare storage; ConditionStatus:True + Warning create pod; ConditionStatus:True 2m5s KubeDB Ops-manager Operator create pod; ConditionStatus:True + Warning patch ops request; ConditionStatus:True 2m5s KubeDB Ops-manager Operator patch ops request; ConditionStatus:True + Warning get pod; ConditionStatus:True 2m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Normal VolumeExpansionDataNode 115s KubeDB Ops-manager Operator successfully updated data node PVC sizes + Warning get pod; ConditionStatus:True 110s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning patch ops request; ConditionStatus:True 110s KubeDB Ops-manager Operator patch ops request; ConditionStatus:True + Warning delete pod; ConditionStatus:True 110s KubeDB Ops-manager Operator delete pod; ConditionStatus:True + Warning get pod; ConditionStatus:False 105s KubeDB Ops-manager Operator get pod; ConditionStatus:False + Warning get pod; ConditionStatus:True 75s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 75s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning patch pvc; ConditionStatus:True 75s KubeDB Ops-manager Operator patch pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:False 75s KubeDB Ops-manager Operator compare storage; ConditionStatus:False + Warning get pod; ConditionStatus:True 70s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 70s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 65s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 65s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 60s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 60s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 55s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 55s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:True 55s KubeDB Ops-manager Operator compare storage; ConditionStatus:True + Warning create pod; ConditionStatus:True 55s KubeDB Ops-manager Operator create pod; ConditionStatus:True + Warning patch ops request; ConditionStatus:True 55s KubeDB Ops-manager Operator patch ops request; ConditionStatus:True + Warning get pod; ConditionStatus:True 50s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning running solr; ConditionStatus:False 50s KubeDB Ops-manager Operator running solr; ConditionStatus:False + Warning get pod; ConditionStatus:True 45s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 40s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 35s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Normal VolumeExpansionOverseerNode 30s KubeDB Ops-manager Operator successfully updated overseer node PVC sizes + Normal UpdatePetSets 24s KubeDB Ops-manager Operator successfully reconciled the Solr resources + Warning get pet set; ConditionStatus:True 19s KubeDB Ops-manager Operator get pet set; ConditionStatus:True + Warning get pet set; ConditionStatus:True 19s KubeDB Ops-manager Operator get pet set; ConditionStatus:True + Normal ReadyPetSets 19s KubeDB Ops-manager Operator PetSet is recreated + Normal Starting 19s KubeDB Ops-manager Operator Resuming Solr database: demo/solr-cluster + Normal Successful 19s KubeDB Ops-manager Operator Successfully resumed Solr database: demo/solr-cluster for SolrOpsRequest: sl-volume-exp-topology +``` + +Now, we are going to verify from the `Petset`, and the `Persistent Volumes` whether the volume of the database has expanded to meet the desired state, Let's check, + +```bash +$ kubectl get petset -n demo solr-cluster-data -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"11Gi" +$ kubectl get petset -n demo solr-cluster-overseer -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"11Gi" +$ kubectl get petset -n demo solr-cluster-coordinator -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"1Gi" +$ kubectl get pv -n demo +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE +pvc-31538e3e-2d02-4ca0-9b76-5da7c63cea70 11Gi RWO Delete Bound demo/solr-cluster-data-solr-cluster-data-0 longhorn 52m +pvc-8c5b14ab-3da4-4492-abf4-edd7faa265ef 11Gi RWO Delete Bound demo/solr-cluster-data-solr-cluster-overseer-0 longhorn 52m +pvc-95522f35-52bd-4978-b66f-1979cec34982 1Gi RWO Delete Bound demo/solr-cluster-data-solr-cluster-coordinator-0 longhorn 52m +``` + +The above output verifies that we have successfully expanded the volume of the Solr. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete solropsrequest -n demo ksl-volume-exp-topology +kubectl delete sl -n demo solr-cluster +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Solr object](/docs/guides/solr/concepts/solr.md). +- Different Solr topology clustering modes [here](/docs/guides/solr/clustering/topology_cluster.md). +- Monitor your Solr database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/solr/monitoring/prometheus-operator.md). + +- Monitor your Solr database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/solr/monitoring/prometheus-builtin.md) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/images/day-2-operation/solr/compute-autoscaling.svg b/docs/images/day-2-operation/solr/compute-autoscaling.svg new file mode 100644 index 0000000000..66440457be --- /dev/null +++ b/docs/images/day-2-operation/solr/compute-autoscaling.svg @@ -0,0 +1,209 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/images/day-2-operation/solr/horizontal-scaling.svg b/docs/images/day-2-operation/solr/horizontal-scaling.svg new file mode 100644 index 0000000000..99c0a7a053 --- /dev/null +++ b/docs/images/day-2-operation/solr/horizontal-scaling.svg @@ -0,0 +1,146 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/images/day-2-operation/solr/reconfigure-tls.svg b/docs/images/day-2-operation/solr/reconfigure-tls.svg new file mode 100644 index 0000000000..82c14b8fe1 --- /dev/null +++ b/docs/images/day-2-operation/solr/reconfigure-tls.svg @@ -0,0 +1,146 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/images/day-2-operation/solr/reconfigure.svg b/docs/images/day-2-operation/solr/reconfigure.svg new file mode 100644 index 0000000000..541a85e614 --- /dev/null +++ b/docs/images/day-2-operation/solr/reconfigure.svg @@ -0,0 +1,152 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/images/day-2-operation/solr/storage-autoscaling.svg b/docs/images/day-2-operation/solr/storage-autoscaling.svg new file mode 100644 index 0000000000..23b2be82ce --- /dev/null +++ b/docs/images/day-2-operation/solr/storage-autoscaling.svg @@ -0,0 +1,236 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/images/day-2-operation/solr/update-version.svg b/docs/images/day-2-operation/solr/update-version.svg new file mode 100644 index 0000000000..f10910e452 --- /dev/null +++ b/docs/images/day-2-operation/solr/update-version.svg @@ -0,0 +1,151 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/images/day-2-operation/solr/vertical-scaling.svg b/docs/images/day-2-operation/solr/vertical-scaling.svg new file mode 100644 index 0000000000..6b2f2f3ddf --- /dev/null +++ b/docs/images/day-2-operation/solr/vertical-scaling.svg @@ -0,0 +1,151 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/images/day-2-operation/solr/volume-expansion.svg b/docs/images/day-2-operation/solr/volume-expansion.svg new file mode 100644 index 0000000000..071360346b --- /dev/null +++ b/docs/images/day-2-operation/solr/volume-expansion.svg @@ -0,0 +1,191 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/images/solr/monitoring/solr-builtin-prom-target.png b/docs/images/solr/monitoring/solr-builtin-prom-target.png new file mode 100644 index 0000000000..7b7662e7cb Binary files /dev/null and b/docs/images/solr/monitoring/solr-builtin-prom-target.png differ diff --git a/docs/images/solr/monitoring/solr-operator-prom-target.png b/docs/images/solr/monitoring/solr-operator-prom-target.png new file mode 100644 index 0000000000..2f0a484909 Binary files /dev/null and b/docs/images/solr/monitoring/solr-operator-prom-target.png differ diff --git a/docs/images/solr/tls.svg b/docs/images/solr/tls.svg new file mode 100644 index 0000000000..4fb3e9ceaa --- /dev/null +++ b/docs/images/solr/tls.svg @@ -0,0 +1,125 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +