diff --git a/incubator/artifactory/README.md b/incubator/artifactory/README.md deleted file mode 100644 index f0e6f8549f70..000000000000 --- a/incubator/artifactory/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# JFrog Artifactory Helm Chart - -## Prerequisites Details - -* Artifactory Pro trial license [get one from here](https://www.jfrog.com/artifactory/free-trial/) - -## Todo - -* Implement Support of Reverse proxy for Docker Repo using Nginx -* Smarter upscaling/downscaling - -## Chart Details -This chart will do the following: - -* Deploy Artifactory-oss -* Deploy Artifactory-Pro - -## Installing the Chart - -To install the chart with the release name `my-release`: - -```bash -$ helm install --name my-release incubator/artifactory -``` - -Note: By default it will run Artifactory-oss to run Artifactory-Pro uncomment image in value.yaml or use following command -```bash -$ helm install --name my-release --set image=docker.bintray.io/jfrog/artifactory-pro incubator/artifactory -``` - -## Deleting the Charts - -Deletion of the PetSet doesn't cascade to deleting associated Pods and PVCs. To delete them: - -``` - $ helm delete my-release -``` - -## Configuration - -The following tables lists the configurable parameters of the artifactory chart and their default values. - -| Parameter | Description | Default | -|---------------------------|-----------------------------------|----------------------------------------------------------| -| `Image` | Container image name | `docker.bintray.io/jfrog/artifactory-oss` | -| `ImageTag` | Container image tag | `5.2.0` | -| `ImagePullPolicy` | Container pull policy | `Always` | - -Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. - - -## Useful links -https://www.jfrog.com -https://www.jfrog.com/confluence/ diff --git a/incubator/artifactory/templates/NOTES.txt b/incubator/artifactory/templates/NOTES.txt deleted file mode 100644 index 87715abc13f8..000000000000 --- a/incubator/artifactory/templates/NOTES.txt +++ /dev/null @@ -1,22 +0,0 @@ -Get the Artifactory URL to visit by running these commands in the same shell: -{{- if contains "NodePort" .Values.ServiceType }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT/ - -{{- else if contains "LoadBalancer" .Values.ServiceType }} -**** NOTE: It may take a few minutes for the LoadBalancer IP to be available. **** -**** You can watch the status of by running 'kubectl get svc -w {{ template "fullname" . }}' **** - export SERVICE_IP=$(kubectl get svc {{ template "fullname" . }} --namespace {{ .Release.Namespace }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.httpPort }}/ - -{{- else if contains "ClusterIP" .Values.ServiceType }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "fullname" . }}" -o jsonpath="{.items[0].metadata.name}") - echo http://127.0.0.1:{{ .Values.httpPort }} - kubectl port-forward --namespace {{ .Release.Namespace }} $POD_NAME {{ .Values.httpPort }}:{{ .Values.httpPort }} - -{{- end }} - -Default credential for Artifactory: -user: admin -password: password diff --git a/incubator/artifactory/templates/_helpers.tpl b/incubator/artifactory/templates/_helpers.tpl deleted file mode 100644 index bdf219c3bd0b..000000000000 --- a/incubator/artifactory/templates/_helpers.tpl +++ /dev/null @@ -1,16 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{define "name"}}{{default "artifactory" .Values.nameOverride | trunc 24 }}{{end}} - -{{/* -Create a default fully qualified app name. - -We truncate at 24 chars because some Kubernetes name fields are limited to this -(by the DNS naming spec). -*/}} -{{define "fullname"}} -{{- $name := default "artifactory" .Values.nameOverride -}} -{{printf "%s-%s" .Release.Name $name | trunc 24 -}} -{{end}} diff --git a/incubator/artifactory/templates/deployment.yaml b/incubator/artifactory/templates/deployment.yaml deleted file mode 100644 index 7513a7f02c46..000000000000 --- a/incubator/artifactory/templates/deployment.yaml +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: {{template "fullname" .}} - labels: - app: {{ template "fullname" . }} - heritage: "{{ .Release.Service }}" - release: "{{ .Release.Name }}" - chart: "{{.Chart.Name}}-{{.Chart.Version}}" -spec: - replicas: {{default 1 .Values.replicaCount}} - template: - metadata: - labels: - app: {{template "fullname" .}} - release: {{.Release.Name | quote }} - spec: - containers: - - name: {{ template "fullname" . }} - image: "{{ .Values.image}}:{{ .Values.imageTag}}" - imagePullPolicy: {{default "IfNotPresent" .Values.ImagePullPolicy}} - resources: -{{ toYaml .Values.resources | indent 10 }} - ports: - - containerPort: 8081 - name: http - volumeMounts: - - name: etc - mountPath: /var/opt/jfrog/artifactory/etc - - name: logs - mountPath: /var/opt/jfrog/artifactory/logs - - name: data - mountPath: /var/opt/jfrog/artifactory/data - volumes: - - name: data - - name: logs - - name: etc \ No newline at end of file diff --git a/incubator/artifactory/templates/svc.yaml b/incubator/artifactory/templates/svc.yaml deleted file mode 100644 index b211ebb5ccc9..000000000000 --- a/incubator/artifactory/templates/svc.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{template "fullname" .}} - labels: - heritage: {{ .Release.Service | quote }} - release: {{ .Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - app: {{template "fullname" .}} -spec: - ports: - - port: {{default 8081 .Values.httpPort}} - targetPort: 8081 - protocol: TCP - name: http - selector: - app: {{template "fullname" .}} - type: {{.Values.ServiceType}} \ No newline at end of file diff --git a/incubator/artifactory/values.yaml b/incubator/artifactory/values.yaml deleted file mode 100644 index 9fe4e54d353b..000000000000 --- a/incubator/artifactory/values.yaml +++ /dev/null @@ -1,30 +0,0 @@ -# Default values for Artifactory. -# This is a YAML-formatted file. -# Declare name/value pairs to be passed into your templates. -# name: value - -Name: artifactory -Component: "Artifactory" - -## Uncomment following line if you want to run Artifactory-Pro -#image: "docker.bintray.io/jfrog/artifactory-pro" -image: "docker.bintray.io/jfrog/artifactory-oss" -imageTag: "5.2.0" -imagePullPolicy: "Always" -replicaCount: 1 -httpPort: 8081 -## Kubernetes configuration -## For minikube, set this to NodePort, elsewhere use LoadBalancer -## -ServiceType: ClusterIP -resources: - requests: - memory: 2048Mi - cpu: 200m - -## Persist data to a persitent volume -persistence: - enabled: true - storageClass: generic - accessMode: ReadWriteOnce - size: 8Gi diff --git a/incubator/cassandra/Chart.yaml b/incubator/cassandra/Chart.yaml index 75ea05a6f995..941fad162998 100644 --- a/incubator/cassandra/Chart.yaml +++ b/incubator/cassandra/Chart.yaml @@ -1,5 +1,5 @@ name: cassandra -version: 0.1.0 +version: 0.1.1 description: Apache Cassandra is a free and open-source distributed database management system designed to handle large amounts of data across many commodity servers, providing high availability with no single point of failure. icon: https://upload.wikimedia.org/wikipedia/commons/5/5e/Cassandra_logo.svg keywords: diff --git a/incubator/cassandra/templates/cassandra-statefulset.yaml b/incubator/cassandra/templates/cassandra-statefulset.yaml index f4dc45e81682..326e852425d6 100644 --- a/incubator/cassandra/templates/cassandra-statefulset.yaml +++ b/incubator/cassandra/templates/cassandra-statefulset.yaml @@ -78,7 +78,7 @@ spec: fieldPath: status.podIP livenessProbe: exec: - command: [ "/bin/sh", "-c", "nodetool status | grep -E \"^UN\\s+${POD_IP}\"" ] + command: [ "/bin/sh", "-c", "nodetool status" ] initialDelaySeconds: 90 periodSeconds: 30 readinessProbe: @@ -99,12 +99,12 @@ spec: containerPort: {{ default 9160 .Values.config.ports.thrift }} volumeMounts: - name: data - {{- if .Values.persistence.enabled }} mountPath: /var/lib/cassandra - {{- else }} - emptyDir: {} - {{- end }} - {{- if .Values.persistence.enabled }} +{{- if not .Values.persistence.enabled }} + volumes: + - name: data + emptyDir: {} +{{- else }} volumeClaimTemplates: - metadata: name: data @@ -125,4 +125,4 @@ spec: resources: requests: storage: {{ .Values.persistence.size | quote }} - {{- end }} +{{- end }} diff --git a/incubator/elasticsearch/Chart.yaml b/incubator/elasticsearch/Chart.yaml index 2e751ad08dcb..f561acd8c044 100755 --- a/incubator/elasticsearch/Chart.yaml +++ b/incubator/elasticsearch/Chart.yaml @@ -1,11 +1,15 @@ name: elasticsearch home: https://www.elastic.co/products/elasticsearch -version: 0.1.4 +version: 0.1.6 description: Flexible and powerful open source, distributed real-time search and analytics engine. icon: https://static-www.elastic.co/assets/blteb1c97719574938d/logo-elastic-elasticsearch-lt.svg sources: - https://www.elastic.co/products/elasticsearch - https://github.com/jetstack/elasticsearch-pet + - https://github.com/giantswarm/kubernetes-elastic-stack + - https://github.com/GoogleCloudPlatform/elasticsearch-docker maintainers: - name: Christian Simon email: christian@jetstack.io + - name: Michael Haselton + email: michael.haselton@gmail.com diff --git a/incubator/elasticsearch/README.md b/incubator/elasticsearch/README.md index f9af1b3b0ddb..ed4448a6b77b 100644 --- a/incubator/elasticsearch/README.md +++ b/incubator/elasticsearch/README.md @@ -7,14 +7,14 @@ elasticsearch and their ## Prerequisites Details -* Kubernetes 1.3 with alpha APIs enabled +* Kubernetes 1.5 * PV dynamic provisioning support on the underlying infrastructure -## PetSet Details -* http://kubernetes.io/docs/user-guide/petset/ +## StatefulSets Details +* https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ -## PetSet Caveats -* http://kubernetes.io/docs/user-guide/petset/#alpha-limitations +## StatefulSets Caveats +* https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#limitations ## Todo @@ -25,9 +25,9 @@ elasticsearch and their ## Chart Details This chart will do the following: -* Implemented a dynamically scalable elasticsearch cluster using Kubernetes PetSets/Deployments +* Implemented a dynamically scalable elasticsearch cluster using Kubernetes StatefulSets/Deployments * Multi-role deployment: master, client and data nodes -* PetSet Supports scaling down without degrading the cluster +* Statefulset Supports scaling down without degrading the cluster ## Installing the Chart @@ -51,33 +51,27 @@ $ kubectl delete pvcs -l release=my-release,type=data The following tables lists the configurable parameters of the elasticsearch chart and their default values. -| Parameter | Description | Default | -|---------------------------|-----------------------------------|----------------------------------------------------------| -| `Image` | Container image name | `jetstack/elasticsearch-pet` | -| `ImageTag` | Container image tag | `2.3.4` | -| `ImagePullPolicy` | Container pull policy | `Always` | -| `ClientReplicas` | Client node replicas (deployment) | `2` | -| `ClientCpuRequests` | Client node requested cpu | `25m` | -| `ClientMemoryRequests` | Client node requested memory | `256Mi` | -| `ClientCpuLimits` | Client node requested cpu | `100m` | -| `ClientMemoryLimits` | Client node requested memory | `512Mi` | -| `ClientHeapSize` | Client node heap size | `128m` | -| `MasterReplicas` | Master node replicas (deployment) | `2` | -| `MasterCpuRequests` | Master node requested cpu | `25m` | -| `MasterMemoryRequests` | Master node requested memory | `256Mi` | -| `MasterCpuLimits` | Master node requested cpu | `100m` | -| `MasterMemoryLimits` | Master node requested memory | `512Mi` | -| `MasterHeapSize` | Master node heap size | `128m` | -| `DataReplicas` | Data node replicas (petset) | `3` | -| `DataCpuRequests` | Data node requested cpu | `250m` | -| `DataMemoryRequests` | Data node requested memory | `2Gi` | -| `DataCpuLimits` | Data node requested cpu | `1` | -| `DataMemoryLimits` | Data node requested memory | `4Gi` | -| `DataHeapSize` | Data node heap size | `1536m` | -| `DataStorage` | Data persistent volume size | `30Gi` | -| `DataStorageClass` | Data persistent volume Class | `anything` | -| `DataStorageClassVersion` | Version of StorageClass | `alpha` | -| `Component` | Selector Key | `elasticsearch` | +| Parameter | Description | Default | +| ------------------------------------ | --------------------------------------- | ----------------------------------- | +| `image.repository` | Container image name | `jetstack/elasticsearch-pet` | +| `image.tag` | Container image tag | `2.4.0` | +| `image.pullPolicy` | Container pull policy | `Always` | +| `client.name` | Client component name | `client` | +| `client.replicas` | Client node replicas (deployment) | `2` | +| `client.resources` | Client node resources requests & limits | `{} - cpu limit must be an integer` | +| `client.heapSize` | Client node heap size | `128m` | +| `client.serviceType` | Client service type | `ClusterIP` | +| `master.name` | Master component name | `master` | +| `master.replicas` | Master node replicas (deployment) | `2` | +| `master.resources` | Master node resources requests & limits | `{} - cpu limit must be an integer` | +| `master.heapSize` | Master node heap size | `128m` | +| `master.name` | Data component name | `data` | +| `data.replicas` | Data node replicas (statefulset) | `3` | +| `data.resources` | Data node resources requests & limits | `{} - cpu limit must be an integer` | +| `data.heapSize` | Data node heap size | `1536m` | +| `data.storage` | Data persistent volume size | `30Gi` | +| `data.storageClass` | Data persistent volume Class | `nil` | +| `data.terminationGracePeriodSeconds` | Data termination grace period (seconds) | `3600` | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. @@ -102,7 +96,7 @@ would degrade performance heaviliy. The issue is tracked in ## Select right storage class for SSD volumes -### GCE + Kubernetes 1.4 +### GCE + Kubernetes 1.5 Create StorageClass for SSD-PD @@ -117,9 +111,8 @@ parameters: type: pd-ssd EOF ``` -Create cluster with Storage class `ssd` on Kubernetes 1.4+ +Create cluster with Storage class `ssd` on Kubernetes 1.5+ ``` -$ helm install incubator/elasticsearch --name my-release --set DataStorageClass=ssd,DataStorageClassVersion=beta - +$ helm install incubator/elasticsearch --name my-release --set data.storageClass=ssd,data.storage=100Gi ``` diff --git a/incubator/elasticsearch/templates/NOTES.txt b/incubator/elasticsearch/templates/NOTES.txt new file mode 100644 index 000000000000..de5071f85d4e --- /dev/null +++ b/incubator/elasticsearch/templates/NOTES.txt @@ -0,0 +1,31 @@ +The elasticsearch cluster has been installed. + +Elasticsearch can be accessed: + + * Within your cluster, at the following DNS name at port 9200: + + {{ template "client.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + + * From outside the cluster, run these commands in the same shell: + {{- if contains "NodePort" .Values.client.serviceType }} + + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "client.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT + {{- else if contains "LoadBalancer" .Values.client.serviceType }} + + WARNING: You have likely exposed your Elasticsearch cluster direct to the internet. + Elasticsearch does not implement any security for public facing clusters by default. + As a minimum level of security; switch to ClusterIP/NodePort and place an Nginx gateway infront of the cluster in order to lock down access to dangerous HTTP endpoints and verbs. + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "client.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "client.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:9200 + {{- else if contains "ClusterIP" .Values.client.serviceType }} + + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "name" . }},component={{ .Values.client.name }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:9200 to use Elasticsearch" + kubectl port-forward --namespace {{ .Release.Namespace }} $POD_NAME 9200:9200 + {{- end }} diff --git a/incubator/elasticsearch/templates/_helpers.tpl b/incubator/elasticsearch/templates/_helpers.tpl new file mode 100644 index 000000000000..172629fa06be --- /dev/null +++ b/incubator/elasticsearch/templates/_helpers.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified client name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "client.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.client.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified data name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "data.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.data.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified master name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "master.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.master.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/incubator/elasticsearch/templates/elasticsearch-client-deployment.yaml b/incubator/elasticsearch/templates/elasticsearch-client-deployment.yaml index 7ed58990c933..3edc47f0bcea 100644 --- a/incubator/elasticsearch/templates/elasticsearch-client-deployment.yaml +++ b/incubator/elasticsearch/templates/elasticsearch-client-deployment.yaml @@ -1,30 +1,44 @@ -apiVersion: extensions/v1beta1 +apiVersion: apps/v1beta1 kind: Deployment metadata: - name: "{{ printf "%s-client-%s" .Release.Name .Values.Name | trunc 24 }}" labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" - type: client + app: {{ template "name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.client.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "client.fullname" . }} spec: - replicas: {{default 2 .Values.ClientReplicas }} + replicas: {{ .Values.client.replicas }} template: metadata: labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" - type: client + app: {{ template "name" . }} + component: "{{ .Values.client.name }}" + release: {{ .Release.Name }} + annotations: + # see https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html + # and https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#mlockall + pod.alpha.kubernetes.io/init-containers: '[ + { + "name": "sysctl", + "image": "busybox", + "imagePullPolicy": "Always", + "command": ["sysctl", "-w", "vm.max_map_count=262144"], + "securityContext": { + "privileged": true + } + } + ]' spec: - serviceAccountName: "{{ printf "%s-%s" .Release.Name .Values.Name | trunc 24 }}" + serviceAccountName: {{ template "fullname" . }} containers: - name: elasticsearch env: - name: SERVICE - value: "{{ printf "%s-cluster-%s" .Release.Name .Values.Name | trunc 24 }}" + value: {{ template "master.fullname" . }} + - name: KUBERNETES_MASTER + value: kubernetes.default.svc.cluster.local - name: KUBERNETES_NAMESPACE valueFrom: fieldRef: @@ -33,30 +47,34 @@ spec: value: "false" - name: NODE_MASTER value: "false" - - name: ES_HEAP_SIZE - value: "{{.Values.ClientHeapSize}}" + - name: PROCESSORS + valueFrom: + resourceFieldRef: + resource: limits.cpu + - name: ES_JAVA_OPTS + value: "-Djava.net.preferIPv4Stack=true -Xms{{ .Values.client.heapSize }} -Xmx{{ .Values.client.heapSize }}" resources: - requests: - cpu: "{{.Values.ClientCpuRequests}}" - memory: "{{.Values.ClientMemoryRequests}}" - limits: - cpu: "{{.Values.ClientCpuLimits}}" - memory: "{{.Values.ClientMemoryLimits}}" - livenessProbe: - httpGet: - path: / - port: 9200 - initialDelaySeconds: 30 - timeoutSeconds: 1 +{{ toYaml .Values.client.resources | indent 12 }} readinessProbe: httpGet: - path: / + path: /_cluster/health?local=true port: 9200 - timeoutSeconds: 5 - image: "{{.Values.Image}}:{{.Values.ImageTag}}" - imagePullPolicy: "{{.Values.ImagePullPolicy}}" + initialDelaySeconds: 5 + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ default "" .Values.image.pullPolicy | quote }} ports: - containerPort: 9200 name: http - containerPort: 9300 name: transport + volumeMounts: + - mountPath: /usr/share/elasticsearch/config/elasticsearch.yml + name: config + subPath: elasticsearch.yml + - mountPath: /usr/share/elasticsearch/config/logging.yml + name: config + subPath: logging.yml + volumes: + - name: config + configMap: + name: {{ template "fullname" . }} diff --git a/incubator/elasticsearch/templates/elasticsearch-client-svc.yaml b/incubator/elasticsearch/templates/elasticsearch-client-svc.yaml new file mode 100644 index 000000000000..32f2095efad8 --- /dev/null +++ b/incubator/elasticsearch/templates/elasticsearch-client-svc.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: {{ template "name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.client.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "client.fullname" . }} +spec: + ports: + - port: 9200 + targetPort: http + selector: + app: {{ template "name" . }} + component: "{{ .Values.client.name }}" + release: {{ .Release.Name }} + type: {{ .Values.client.serviceType }} diff --git a/incubator/elasticsearch/templates/elasticsearch-cluster-svc.yaml b/incubator/elasticsearch/templates/elasticsearch-cluster-svc.yaml deleted file mode 100644 index afb82c964006..000000000000 --- a/incubator/elasticsearch/templates/elasticsearch-cluster-svc.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: "{{ printf "%s-cluster-%s" .Release.Name .Values.Name | trunc 24 }}" - labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" -spec: - clusterIP: None - ports: - - port: 9300 - targetPort: 9300 - selector: - component: "{{.Release.Name}}-{{.Values.Component}}" - diff --git a/incubator/elasticsearch/templates/elasticsearch-configmap.yaml b/incubator/elasticsearch/templates/elasticsearch-configmap.yaml new file mode 100644 index 000000000000..8a28611e7f16 --- /dev/null +++ b/incubator/elasticsearch/templates/elasticsearch-configmap.yaml @@ -0,0 +1,111 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "fullname" . }} + labels: + app: {{ template "fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +data: + elasticsearch.yml: |- + node.data: ${NODE_DATA:true} + node.master: ${NODE_MASTER:true} + node.name: ${HOSTNAME} + + # see https://github.com/kubernetes/kubernetes/issues/3595 + bootstrap.mlockall: ${BOOTSTRAP_MLOCKALL:false} + + network.host: 0.0.0.0 + + cloud: + kubernetes: + service: ${SERVICE} + namespace: ${KUBERNETES_NAMESPACE} + + discovery: + type: kubernetes + zen: + minimum_master_nodes: 2 + + # see https://github.com/elastic/elasticsearch-definitive-guide/pull/679 + processors: ${PROCESSORS:} + + # avoid split-brain w/ a minimum consensus of two masters plus a data node + gateway.expected_master_nodes: ${EXPECTED_MASTER_NODES:2} + gateway.expected_data_nodes: ${EXPECTED_DATA_NODES:1} + gateway.recover_after_time: ${RECOVER_AFTER_TIME:5m} + gateway.recover_after_master_nodes: ${RECOVER_AFTER_MASTER_NODES:2} + gateway.recover_after_data_nodes: ${RECOVER_AFTER_DATA_NODES:1} + logging.yml: |- + # you can override this using by setting a system property, for example -Des.logger.level=DEBUG + es.logger.level: INFO + rootLogger: ${es.logger.level}, console + logger: + # log action execution errors for easier debugging + action: DEBUG + # reduce the logging for aws, too much is logged under the default INFO + com.amazonaws: WARN + + appender: + console: + type: console + layout: + type: consolePattern + conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" + pre-stop-hook.sh: |- + #!/bin/bash + set -e + + SERVICE_ACCOUNT_PATH=/var/run/secrets/kubernetes.io/serviceaccount + KUBE_TOKEN=$(<${SERVICE_ACCOUNT_PATH}/token) + KUBE_NAMESPACE=$(<${SERVICE_ACCOUNT_PATH}/namespace) + + STATEFULSET_NAME=$(echo "${HOSTNAME}" | sed 's/-[0-9]*$//g') + INSTANCE_ID=$(echo "${HOSTNAME}" | grep -o '[0-9]*$') + + echo "Prepare stopping of Pet ${KUBE_NAMESPACE}/${HOSTNAME} of StatefulSet ${KUBE_NAMESPACE}/${STATEFULSET_NAME} instance_id ${INSTANCE_ID}" + + INSTANCES_DESIRED=$(curl -s \ + --cacert ${SERVICE_ACCOUNT_PATH}/ca.crt \ + -H "Authorization: Bearer $KUBE_TOKEN" \ + "https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_PORT_443_TCP_PORT}/apis/apps/v1beta1/namespaces/${KUBE_NAMESPACE}/statefulsets/${STATEFULSET_NAME}/status" | jq -r '.spec.replicas') + + echo "Desired instance count is ${INSTANCES_DESIRED}" + + if [ "${INSTANCE_ID}" -lt "${INSTANCES_DESIRED}" ]; then + echo "No data migration needed" + exit 0 + fi + + echo "Prepare to migrate data of the node" + + NODE_STATS=$(curl -s -XGET 'http://localhost:9200/_nodes/stats') + NODE_IP=$(echo "${NODE_STATS}" | jq -r ".nodes[] | select(.name==\"${HOSTNAME}\") | .host") + + echo "Move all data from node ${NODE_IP}" + + curl -s -XPUT localhost:9200/_cluster/settings -d "{ + \"transient\" :{ + \"cluster.routing.allocation.exclude._ip\" : \"${NODE_IP}\" + } + }" + echo + + echo "Wait for node to become empty" + DOC_COUNT=$(echo "${NODE_STATS}" | jq ".nodes[] | select(.name==\"${HOSTNAME}\") | .indices.docs.count") + while [ "${DOC_COUNT}" -gt 0 ]; do + NODE_STATS=$(curl -s -XGET 'http://localhost:9200/_nodes/stats') + DOC_COUNT=$(echo "${NODE_STATS}" | jq -r ".nodes[] | select(.name==\"${HOSTNAME}\") | .indices.docs.count") + echo "Node contains ${DOC_COUNT} documents" + sleep 1 + done + + curl -s -XPUT localhost:9200/_cluster/settings -d "{ + \"transient\" :{ + \"cluster.routing.allocation.exclude._ip\" : \"\" + } + }" + echo + + echo "Node clear to shutdown" diff --git a/incubator/elasticsearch/templates/elasticsearch-data-petset.yaml b/incubator/elasticsearch/templates/elasticsearch-data-petset.yaml deleted file mode 100644 index cd4746c41815..000000000000 --- a/incubator/elasticsearch/templates/elasticsearch-data-petset.yaml +++ /dev/null @@ -1,82 +0,0 @@ -apiVersion: apps/v1alpha1 -kind: PetSet -metadata: - name: "{{ printf "%s-data-%s" .Release.Name .Values.Name | trunc 24 }}" - labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" - type: data -spec: - serviceName: "{{ printf "%s-data-%s" .Release.Name .Values.Name | trunc 24 }}" - replicas: {{default 3 .Values.DataReplicas }} - template: - metadata: - labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" - type: data - annotations: - pod.alpha.kubernetes.io/initialized: "true" - spec: - serviceAccountName: "{{ printf "%s-%s" .Release.Name .Values.Name | trunc 24 }}" - containers: - - name: elasticsearch - env: - - name: SERVICE - value: "{{ printf "%s-cluster-%s" .Release.Name .Values.Name | trunc 24 }}" - - name: KUBERNETES_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: NODE_MASTER - value: "false" - - name: ES_HEAP_SIZE - value: "{{.Values.DataHeapSize}}" - image: "{{.Values.Image}}:{{.Values.ImageTag}}" - imagePullPolicy: "{{.Values.ImagePullPolicy}}" - ports: - - containerPort: 9300 - name: transport - resources: - requests: - cpu: "{{.Values.DataCpuRequests}}" - memory: "{{.Values.DataMemoryRequests}}" - limits: - cpu: "{{.Values.DataCpuLimits}}" - memory: "{{.Values.DataMemoryLimits}}" - livenessProbe: - httpGet: - path: / - port: 9200 - initialDelaySeconds: 30 - timeoutSeconds: 1 - readinessProbe: - httpGet: - path: / - port: 9200 - timeoutSeconds: 5 - volumeMounts: - - mountPath: /usr/share/elasticsearch/data - name: elasticsearch-data - lifecycle: - preStop: - exec: - command: ["/bin/bash","/pre-stop-hook.sh"] - volumeClaimTemplates: - - metadata: - name: elasticsearch-data - annotations: - volume.{{ .Values.DataStorageClassVersion }}.kubernetes.io/storage-class: "{{ .Values.DataStorageClass }}" - spec: - accessModes: [ ReadWriteOnce ] - resources: - requests: - storage: "{{.Values.DataStorage}}" diff --git a/incubator/elasticsearch/templates/elasticsearch-data-statefulset.yaml b/incubator/elasticsearch/templates/elasticsearch-data-statefulset.yaml new file mode 100644 index 000000000000..30aaba2c1381 --- /dev/null +++ b/incubator/elasticsearch/templates/elasticsearch-data-statefulset.yaml @@ -0,0 +1,99 @@ +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + labels: + app: {{ template "name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.data.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "data.fullname" . }} +spec: + serviceName: {{ template "data.fullname" . }} + replicas: {{ .Values.data.replicas }} + template: + metadata: + labels: + app: {{ template "name" . }} + component: "{{ .Values.data.name }}" + release: {{ .Release.Name }} + annotations: + pod.alpha.kubernetes.io/initialized: "true" + # see https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html + # and https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#mlockall + pod.alpha.kubernetes.io/init-containers: '[ + { + "name": "sysctl", + "image": "busybox", + "imagePullPolicy": "Always", + "command": ["sysctl", "-w", "vm.max_map_count=262144"], + "securityContext": { + "privileged": true + } + } + ]' + spec: + serviceAccountName: {{ template "fullname" . }} + containers: + - name: elasticsearch + env: + - name: SERVICE + value: {{ template "master.fullname" . }} + - name: KUBERNETES_MASTER + value: kubernetes.default.svc.cluster.local + - name: KUBERNETES_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: NODE_MASTER + value: "false" + - name: PROCESSORS + valueFrom: + resourceFieldRef: + resource: limits.cpu + - name: ES_JAVA_OPTS + value: "-Djava.net.preferIPv4Stack=true -Xms{{ .Values.data.heapSize }} -Xmx{{ .Values.data.heapSize }}" + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ default "" .Values.image.pullPolicy | quote }} + ports: + - containerPort: 9300 + name: transport + resources: +{{ toYaml .Values.data.resources | indent 12 }} + readinessProbe: + httpGet: + path: /_cluster/health?local=true + port: 9200 + initialDelaySeconds: 5 + volumeMounts: + - mountPath: /usr/share/elasticsearch/data + name: data + - mountPath: /usr/share/elasticsearch/config/elasticsearch.yml + name: config + subPath: elasticsearch.yml + - mountPath: /usr/share/elasticsearch/config/logging.yml + name: config + subPath: logging.yml + - name: config + mountPath: /pre-stop-hook.sh + subPath: pre-stop-hook.sh + lifecycle: + preStop: + exec: + command: ["/bin/bash","/pre-stop-hook.sh"] + terminationGracePeriodSeconds: {{ .Values.data.terminationGracePeriodSeconds }} + volumes: + - name: config + configMap: + name: {{ template "fullname" . }} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ ReadWriteOnce ] + {{- if .Values.data.storageClass }} + storageClassName: "{{ .Values.data.storageClass }}" + {{- end }} + resources: + requests: + storage: "{{ .Values.data.storage }}" diff --git a/incubator/elasticsearch/templates/elasticsearch-data-svc.yaml b/incubator/elasticsearch/templates/elasticsearch-data-svc.yaml deleted file mode 100644 index 47507f25e3d5..000000000000 --- a/incubator/elasticsearch/templates/elasticsearch-data-svc.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - annotations: - service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" - name: "{{ printf "%s-data-%s" .Release.Name .Values.Name | trunc 24 }}" - labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" -spec: - clusterIP: None - ports: - - port: 9300 - targetPort: 9300 - selector: - component: "{{.Release.Name}}-{{.Values.Component}}" - type: data diff --git a/incubator/elasticsearch/templates/elasticsearch-master-deployment.yaml b/incubator/elasticsearch/templates/elasticsearch-master-deployment.yaml index e342b54427e9..663325a287a5 100644 --- a/incubator/elasticsearch/templates/elasticsearch-master-deployment.yaml +++ b/incubator/elasticsearch/templates/elasticsearch-master-deployment.yaml @@ -1,58 +1,76 @@ -apiVersion: extensions/v1beta1 +apiVersion: apps/v1beta1 kind: Deployment metadata: - name: "{{ printf "%s-master-%s" .Release.Name .Values.Name | trunc 24 }}" labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" - type: master + app: {{ template "name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.master.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "master.fullname" . }} spec: - replicas: {{default 2 .Values.MasterReplicas }} + replicas: {{ .Values.master.replicas }} template: metadata: labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" - type: master + app: {{ template "name" . }} + component: "{{ .Values.master.name }}" + release: {{ .Release.Name }} + annotations: + # see https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html + # and https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#mlockall + pod.alpha.kubernetes.io/init-containers: '[ + { + "name": "sysctl", + "image": "busybox", + "imagePullPolicy": "Always", + "command": ["sysctl", "-w", "vm.max_map_count=262144"], + "securityContext": { + "privileged": true + } + } + ]' spec: - serviceAccountName: "{{ printf "%s-%s" .Release.Name .Values.Name | trunc 24 }}" + serviceAccountName: {{ template "fullname" . }} containers: - name: elasticsearch env: - name: SERVICE - value: "{{ printf "%s-cluster-%s" .Release.Name .Values.Name | trunc 24 }}" + value: {{ template "master.fullname" . }} + - name: KUBERNETES_MASTER + value: kubernetes.default.svc.cluster.local - name: KUBERNETES_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: NODE_DATA value: "false" - - name: ES_HEAP_SIZE - value: "{{.Values.MasterHeapSize}}" + - name: PROCESSORS + valueFrom: + resourceFieldRef: + resource: limits.cpu + - name: ES_JAVA_OPTS + value: "-Djava.net.preferIPv4Stack=true -Xms{{ .Values.master.heapSize }} -Xmx{{ .Values.master.heapSize }}" resources: - requests: - cpu: "{{.Values.MasterCpuRequests}}" - memory: "{{.Values.MasterMemoryRequests}}" - limits: - cpu: "{{.Values.MasterCpuLimits}}" - memory: "{{.Values.MasterMemoryLimits}}" - livenessProbe: - httpGet: - path: / - port: 9200 - initialDelaySeconds: 30 - timeoutSeconds: 1 +{{ toYaml .Values.master.resources | indent 12 }} readinessProbe: httpGet: - path: / + path: /_cluster/health?local=true port: 9200 - timeoutSeconds: 5 - image: "{{.Values.Image}}:{{.Values.ImageTag}}" - imagePullPolicy: "{{.Values.ImagePullPolicy}}" + initialDelaySeconds: 5 + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ default "" .Values.image.pullPolicy | quote }} ports: - containerPort: 9300 name: transport + volumeMounts: + - mountPath: /usr/share/elasticsearch/config/elasticsearch.yml + name: config + subPath: elasticsearch.yml + - mountPath: /usr/share/elasticsearch/config/logging.yml + name: config + subPath: logging.yml + volumes: + - name: config + configMap: + name: {{ template "fullname" . }} diff --git a/incubator/elasticsearch/templates/elasticsearch-master-svc.yaml b/incubator/elasticsearch/templates/elasticsearch-master-svc.yaml new file mode 100644 index 000000000000..7d3f484231ea --- /dev/null +++ b/incubator/elasticsearch/templates/elasticsearch-master-svc.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: {{ template "name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.master.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "master.fullname" . }} +spec: + clusterIP: None + ports: + - port: 9300 + targetPort: 9300 + selector: + app: {{ template "name" . }} + component: "{{ .Values.master.name }}" + release: {{ .Release.Name }} diff --git a/incubator/elasticsearch/templates/elasticsearch-service-account.yaml b/incubator/elasticsearch/templates/elasticsearch-service-account.yaml index 13a38699354f..b187517fe890 100644 --- a/incubator/elasticsearch/templates/elasticsearch-service-account.yaml +++ b/incubator/elasticsearch/templates/elasticsearch-service-account.yaml @@ -1,10 +1,9 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: "{{ printf "%s-%s" .Release.Name .Values.Name | trunc 24 }}" labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" - type: master + app: {{ template "name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "fullname" . }} diff --git a/incubator/elasticsearch/templates/elasticsearch-svc.yaml b/incubator/elasticsearch/templates/elasticsearch-svc.yaml deleted file mode 100644 index ff176da15412..000000000000 --- a/incubator/elasticsearch/templates/elasticsearch-svc.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: "{{ printf "%s-%s" .Release.Name .Values.Name | trunc 24 }}" - labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" -spec: - ports: - - port: 9200 - targetPort: http - selector: - component: "{{.Release.Name}}-{{.Values.Component}}" - type: client diff --git a/incubator/elasticsearch/values.yaml b/incubator/elasticsearch/values.yaml index 9567ff029bf4..1fda84a7f204 100644 --- a/incubator/elasticsearch/values.yaml +++ b/incubator/elasticsearch/values.yaml @@ -1,33 +1,48 @@ # Default values for elasticsearch. # This is a YAML-formatted file. -# Declare name/value pairs to be passed into your templates. -# name: value +# Declare variables to be passed into your templates. -Name: es -PeerPort: 2380 -ClientPort: 2379 -Component: "elasticsearch" -Image: "jetstack/elasticsearch-pet" -ImageTag: "2.3.5" -ImagePullPolicy: "Always" -ClientReplicas: 2 -ClientCpuRequests: "25m" -ClientMemoryRequests: "256Mi" -ClientCpuLimits: "100m" -ClientMemoryLimits: "512Mi" -ClientHeapSize: "128m" -MasterReplicas: 2 -MasterCpuRequests: "25m" -MasterMemoryRequests: "256Mi" -MasterCpuLimits: "100m" -MasterMemoryLimits: "512Mi" -MasterHeapSize: "128m" -DataReplicas: 3 -DataCpuRequests: "250m" -DataMemoryRequests: "2Gi" -DataCpuLimits: "1" -DataMemoryLimits: "4Gi" -DataHeapSize: "1536m" -DataStorage: "30Gi" -DataStorageClass: "anything" -DataStorageClassVersion: "alpha" +image: + repository: "jetstack/elasticsearch-pet" + tag: "2.4.0" + pullPolicy: "Always" + +client: + name: client + replicas: 2 + serviceType: ClusterIP + heapSize: "128m" + resources: + limits: + cpu: "1" + memory: "512Mi" + requests: + cpu: "25m" + memory: "256Mi" + +master: + name: master + replicas: 2 + heapSize: "128m" + resources: + limits: + cpu: "1" + memory: "512Mi" + requests: + cpu: "25m" + memory: "256Mi" + +data: + name: data + replicas: 3 + heapSize: "1536m" + storage: "30Gi" + # storageClass: "ssd" + terminationGracePeriodSeconds: 3600 + resources: + limits: + cpu: "1" + memory: "512Mi" + requests: + cpu: "25m" + memory: "256Mi" diff --git a/incubator/etcd/templates/etcd-statefulset.yaml b/incubator/etcd/templates/etcd-statefulset.yaml index 7b311198ff05..eb928dc774a6 100644 --- a/incubator/etcd/templates/etcd-statefulset.yaml +++ b/incubator/etcd/templates/etcd-statefulset.yaml @@ -6,59 +6,59 @@ metadata: metadata: name: "{{ printf "%s-%s" .Release.Name .Values.Name | trunc 24 }}" labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" spec: ports: - - port: {{.Values.PeerPort}} + - port: {{ .Values.PeerPort }} name: etcd-server - - port: {{.Values.ClientPort}} + - port: {{ .Values.ClientPort }} name: etcd-client clusterIP: None selector: - component: "{{.Release.Name}}-{{.Values.Component}}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" --- apiVersion: apps/v1beta1 kind: StatefulSet metadata: name: "{{ printf "%s-%s" .Release.Name .Values.Name | trunc 24 }}" labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" spec: serviceName: "{{ printf "%s-%s" .Release.Name .Values.Name | trunc 24 }}" - replicas: {{default 3 .Values.Replicas}} + replicas: {{ default 3 .Values.Replicas }} template: metadata: name: "{{ printf "%s-%s" .Release.Name .Values.Name | trunc 24 }}" labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" annotations: pod.alpha.kubernetes.io/initialized: "true" spec: containers: - name: "{{ printf "%s-%s" .Release.Name .Values.Name | trunc 24 }}" - image: "{{.Values.Image}}:{{.Values.ImageTag}}" - imagePullPolicy: "{{.Values.ImagePullPolicy}}" + image: "{{ .Values.Image }}:{{ .Values.ImageTag }}" + imagePullPolicy: "{{ .Values.ImagePullPolicy }}" ports: - - containerPort: {{.Values.PeerPort}} + - containerPort: {{ .Values.PeerPort }} name: peer - - containerPort: {{.Values.ClientPort}} + - containerPort: {{ .Values.ClientPort }} name: client resources: requests: - cpu: "{{.Values.Cpu}}" - memory: "{{.Values.Memory}}" + cpu: "{{ .Values.Cpu }}" + memory: "{{ .Values.Memory }}" env: - name: INITIAL_CLUSTER_SIZE - value: {{default 3 .Values.Replicas | quote }} + value: {{ default 3 .Values.Replicas | quote }} - name: SET_NAME value: "{{ printf "%s-%s" .Release.Name .Values.Name | trunc 24 }}" volumeMounts: @@ -204,4 +204,4 @@ spec: resources: requests: # upstream recommended max is 700M - storage: "{{.Values.Storage}}" + storage: "{{ .Values.Storage }}" diff --git a/incubator/gogs/templates/ingress.yaml b/incubator/gogs/templates/ingress.yaml index 39e6735f573a..224dcc59b50b 100644 --- a/incubator/gogs/templates/ingress.yaml +++ b/incubator/gogs/templates/ingress.yaml @@ -25,4 +25,8 @@ spec: serviceName: {{ printf "%s-%s" $releaseName $serviceName | trunc 63 | trimSuffix "-" }} servicePort: {{ $httpPort }} {{- end -}} +{{- if .Values.service.ingress.tls }} + tls: +{{ toYaml .Values.service.ingress.tls | indent 4 }} +{{- end -}} {{- end -}} diff --git a/incubator/istio/templates/addons-grafana-deployment.yaml b/incubator/istio/templates/addons-grafana-deployment.yaml index c5a8ce61e441..ad0980082ce6 100644 --- a/incubator/istio/templates/addons-grafana-deployment.yaml +++ b/incubator/istio/templates/addons-grafana-deployment.yaml @@ -42,4 +42,4 @@ spec: volumes: - name: grafana-data emptyDir: {} -{{end}} \ No newline at end of file +{{ end }} diff --git a/incubator/istio/templates/addons-grafana-svc.yaml b/incubator/istio/templates/addons-grafana-svc.yaml index d3851b126c09..2176176acb7e 100644 --- a/incubator/istio/templates/addons-grafana-svc.yaml +++ b/incubator/istio/templates/addons-grafana-svc.yaml @@ -16,4 +16,4 @@ spec: app: {{ include "name" . }} component: {{ $serviceName }}-{{ .Values.addons.grafana.deployment.name }} release: {{ .Release.Name }} -{{end}} \ No newline at end of file +{{ end }} diff --git a/incubator/istio/templates/addons-servicegraph-svc.yaml b/incubator/istio/templates/addons-servicegraph-svc.yaml index 446535550188..c477540ab5b0 100644 --- a/incubator/istio/templates/addons-servicegraph-svc.yaml +++ b/incubator/istio/templates/addons-servicegraph-svc.yaml @@ -15,4 +15,4 @@ spec: app: {{ include "name" . }} component: {{ $serviceName }}-{{ .Values.addons.servicegraph.deployment.name }} release: {{ .Release.Name }} -{{end}} \ No newline at end of file +{{ end }} diff --git a/incubator/kafka/templates/kafka-ss.yaml b/incubator/kafka/templates/kafka-ss.yaml index 721d871823ca..a38645aa453b 100644 --- a/incubator/kafka/templates/kafka-ss.yaml +++ b/incubator/kafka/templates/kafka-ss.yaml @@ -8,7 +8,7 @@ spec: - port: 2181 name: client selector: - component: "{{.Release.Name}}" + component: "{{ .Release.Name }}" # A headless service to create DNS records --- @@ -17,19 +17,19 @@ kind: Service metadata: name: "{{ printf "%s-broker-%s" .Release.Name .Values.Name | trunc 63 }}" labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" annotations: - "helm.sh/created": {{.Release.Time.Seconds | quote }} + "helm.sh/created": {{ .Release.Time.Seconds | quote }} service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" spec: ports: - port: 9092 clusterIP: None selector: - component: "{{.Release.Name}}-{{.Values.Component}}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" app: kafka --- apiVersion: v1 @@ -37,17 +37,17 @@ kind: Service metadata: name: "{{ printf "%s-%s" .Release.Name .Values.Name | trunc 63 }}" labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" annotations: - "helm.sh/created": {{.Release.Time.Seconds | quote }} + "helm.sh/created": {{ .Release.Time.Seconds | quote }} spec: ports: - port: 9092 selector: - component: "{{.Release.Name}}-{{.Values.Component}}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" app: kafka --- apiVersion: apps/v1beta1 @@ -56,12 +56,12 @@ metadata: name: "{{ printf "%s-%s" .Release.Name .Values.Name | trunc 63 }}" labels: app: kafka - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" annotations: - "helm.sh/created": {{.Release.Time.Seconds | quote }} + "helm.sh/created": {{ .Release.Time.Seconds | quote }} spec: serviceName: "{{ printf "%s-broker-%s" .Release.Name .Values.Name | trunc 63 }}" replicas: {{ default 3 .Values.Replicas }} @@ -69,7 +69,7 @@ spec: metadata: labels: app: kafka - component: "{{.Release.Name}}-{{.Values.Component}}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" annotations: pod.alpha.kubernetes.io/initialized: "true" pod.alpha.kubernetes.io/init-containers: '[ @@ -77,8 +77,8 @@ spec: spec: containers: - name: "{{ printf "%s-broker-%s" .Release.Name .Values.Name | trunc 63 }}" - image: "{{.Values.Image}}:{{.Values.ImageTag}}" - imagePullPolicy: "{{.Values.ImagePullPolicy}}" + image: "{{ .Values.Image }}:{{ .Values.ImageTag }}" + imagePullPolicy: "{{ .Values.ImagePullPolicy }}" livenessProbe: exec: command: @@ -125,4 +125,4 @@ spec: accessModes: [ "ReadWriteOnce" ] resources: requests: - storage: {{.Values.Storage}} + storage: {{ .Values.Storage }} diff --git a/incubator/patroni/Chart.yaml b/incubator/patroni/Chart.yaml index eeada4f79225..caf2fc3c09b0 100644 --- a/incubator/patroni/Chart.yaml +++ b/incubator/patroni/Chart.yaml @@ -1,12 +1,13 @@ name: patroni description: "Highly available elephant herd: HA PostgreSQL cluster." -version: 0.2.0 +version: 0.2.1 +appVersion: 1.0-p5 home: https://github.com/zalando/patroni sources: - https://github.com/zalando/patroni - https://github.com/zalando/spilo maintainers: - - name: Team ACID @ Zalando SE + - name: alexeyklyukin email: team-acid@zalando.de - - name: Team Teapot @ Zalando SE + - name: linki email: team-teapot@zalando.de diff --git a/incubator/patroni/README.md b/incubator/patroni/README.md index 32514b57e613..20fa31e92350 100644 --- a/incubator/patroni/README.md +++ b/incubator/patroni/README.md @@ -57,15 +57,21 @@ The following tables lists the configurable parameters of the patroni chart and | `Spilo.Version` | Container image tag | `1.0-p5` | | `ImagePullPolicy` | Container pull policy | `IfNotPresent` | | `Replicas` | k8s statefulset replicas | `5` | +| `NodeSelector` | nodeSelector map | Empty | | `Component` | k8s selector key | `patroni` | | `Resources.Cpu` | container requested cpu | `100m` | | `Resources.Memory` | container requested memory | `512Mi` | -| `Resources.Storage` | Persistent volume size | `1Gi` | | `Credentials.Superuser` | password for the superuser | `tea` | | `Credentials.Admin` | password for the admin user | `cola` | | `Credentials.Standby` | password for the replication user | `pinacolada` | | `Etcd.Host` | host name of etcd cluster | not used (Etcd.Discovery is used instead) | | `Etcd.Discovery` | domain name of etcd cluster | `-etcd..svc.cluster.local` | +| `persistentVolume.accessModes` | Persistent Volume access modes | `[ReadWriteOnce]` | +| `persistentVolume.annotations` | Annotations for Persistent Volume Claim` | `{}` | +| `persistentVolume.mountPath` | Persistent Volume mount root path | `/home/postgres/pgdata` | +| `persistentVolume.size` | Persistent Volume size | `2Gi` | +| `persistentVolume.storageClass` | Persistent Volume Storage Class | `volume.alpha.kubernetes.io/storage-class: default` | +| `persistentVolume.subPath` | Subdirectory of Persistent Volume to mount | `""` | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. diff --git a/incubator/patroni/requirements.lock b/incubator/patroni/requirements.lock index 62491ae92d3c..a8433054b6ff 100644 --- a/incubator/patroni/requirements.lock +++ b/incubator/patroni/requirements.lock @@ -1,9 +1,10 @@ dependencies: - condition: "" enabled: false + import-values: null name: etcd repository: https://kubernetes-charts-incubator.storage.googleapis.com/ tags: null version: 0.2.0 -digest: sha256:252c24b9a22eb235857447e235fbfa89ea9e13ebffedede26c087540315a88c5 -generated: 2017-03-24T16:20:33.196803732Z +digest: sha256:ed1ddf10ed804801e6b01afd2533dcf3ef4b0c6000513110ff78c1430934c2a1 +generated: 2017-06-08T12:51:07.354627568+02:00 diff --git a/incubator/patroni/templates/ep-patroni.yaml b/incubator/patroni/templates/ep-patroni.yaml index a170b096a4ec..ca84ac34dbc6 100644 --- a/incubator/patroni/templates/ep-patroni.yaml +++ b/incubator/patroni/templates/ep-patroni.yaml @@ -6,5 +6,5 @@ metadata: heritage: {{ .Release.Service | quote }} release: {{ .Release.Name | quote }} chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - component: "{{.Release.Name}}-{{.Values.Component}}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" subsets: [] diff --git a/incubator/patroni/templates/sec-patroni.yaml b/incubator/patroni/templates/sec-patroni.yaml index 38a0210aebe8..770ed7b4aa9b 100644 --- a/incubator/patroni/templates/sec-patroni.yaml +++ b/incubator/patroni/templates/sec-patroni.yaml @@ -6,7 +6,7 @@ metadata: heritage: {{ .Release.Service | quote }} release: {{ .Release.Name | quote }} chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - component: "{{.Release.Name}}-{{.Values.Component}}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" type: Opaque data: password-superuser: {{ .Values.Credentials.Superuser | b64enc }} diff --git a/incubator/patroni/templates/statefulset-patroni.yaml b/incubator/patroni/templates/statefulset-patroni.yaml index 2adbe7be6751..d4d8552fbb9a 100644 --- a/incubator/patroni/templates/statefulset-patroni.yaml +++ b/incubator/patroni/templates/statefulset-patroni.yaml @@ -19,6 +19,12 @@ spec: chart: "{{.Chart.Name}}-{{.Chart.Version}}" component: "{{.Release.Name}}-{{.Values.Component}}" spec: + {{ if .Values.NodeSelector }} + nodeSelector: + {{ range $key, $value := .Values.NodeSelector }} + {{ $key }}: {{ $value | quote }} + {{ end }} + {{ end }} containers: - name: spilo image: "{{ .Values.Spilo.Image }}:{{ .Values.Spilo.Version }}" @@ -64,8 +70,9 @@ spec: cpu: "{{.Values.Resources.Cpu}}" memory: "{{.Values.Resources.Memory}}" volumeMounts: - - name: pgdata - mountPath: /home/postgres/pgdata + - name: storage-volume + mountPath: "{{ .Values.persistentVolume.mountPath }}" + subPath: "{{ .Values.persistentVolume.subPath }}" - mountPath: /etc/patroni name: patroni-config readOnly: true @@ -73,13 +80,27 @@ spec: - name: patroni-config secret: secretName: {{ template "fullname" . }} + volumeClaimTemplates: - - metadata: - name: pgdata - annotations: - volume.alpha.kubernetes.io/storage-class: default - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: "{{.Values.Resources.Storage}}" + - metadata: + name: storage-volume + annotations: + {{- if .Values.persistentVolume.storageClass }} + volume.beta.kubernetes.io/storage-class: "{{ .Values.persistentVolume.storageClass }}" + {{- else }} + volume.alpha.kubernetes.io/storage-class: default + {{- end }} + {{- if .Values.persistentVolume.annotations }} +{{ toYaml .Values.persistentVolume.annotations | indent 8 }} + {{- end }} + labels: + heritage: {{.Release.Service | quote }} + release: {{.Release.Name | quote }} + chart: "{{.Chart.Name}}-{{.Chart.Version}}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" + spec: + accessModes: +{{ toYaml .Values.persistentVolume.accessModes | indent 8 }} + resources: + requests: + storage: "{{ .Values.persistentVolume.size }}" diff --git a/incubator/patroni/templates/svc-patroni.yaml b/incubator/patroni/templates/svc-patroni.yaml index 8bbd672f07a0..9dc55793603a 100644 --- a/incubator/patroni/templates/svc-patroni.yaml +++ b/incubator/patroni/templates/svc-patroni.yaml @@ -6,7 +6,7 @@ metadata: heritage: {{ .Release.Service | quote }} release: {{ .Release.Name | quote }} chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - component: "{{.Release.Name}}-{{.Values.Component}}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" spec: type: ClusterIP ports: diff --git a/incubator/patroni/values.yaml b/incubator/patroni/values.yaml index 2eafa3ae8255..63cca2d8e60f 100644 --- a/incubator/patroni/values.yaml +++ b/incubator/patroni/values.yaml @@ -16,7 +16,10 @@ Replicas: 5 Resources: Cpu: 100m Memory: 512Mi - Storage: 1Gi + +# Node selector for Patroni pods +#NodeSelector: +# cloud.google.com/gke-nodepool: highmem-pool # Credentials used by Patroni # * more information: https://github.com/zalando/patroni/blob/master/docs/SETTINGS.rst#postgresql @@ -29,3 +32,13 @@ Credentials: Etcd: Host: # fill-in value for etcd host (etcd.default.svc.cluster.local), leave blank to use the discovery parameter Discovery: # leave blank to use vendored etcd chart + +persistentVolume: + size: 1G + storageClass: "" + subPath: "" + mountPath: "/home/postgres/data" + annotations: {} + + accessModes: + - ReadWriteOnce diff --git a/incubator/tensorflow-inception/templates/_helpers.tpl b/incubator/tensorflow-inception/templates/_helpers.tpl index 92c9f2080705..2a9bbaad2339 100644 --- a/incubator/tensorflow-inception/templates/_helpers.tpl +++ b/incubator/tensorflow-inception/templates/_helpers.tpl @@ -12,5 +12,5 @@ We truncate at 24 chars because some Kubernetes name fields are limited to this */}} {{- define "fullname" -}} {{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s-%s" .Release.Name $name | trunc 24 -}} -{{- end -}} \ No newline at end of file +{{- printf "%s-%s" .Release.Name $name | trunc 24 | trimSuffix "-" -}} +{{- end -}} diff --git a/incubator/tensorflow-inception/templates/tensorflow-inception.yaml b/incubator/tensorflow-inception/templates/tensorflow-inception.yaml index 98b17165ad4f..6955a160e323 100644 --- a/incubator/tensorflow-inception/templates/tensorflow-inception.yaml +++ b/incubator/tensorflow-inception/templates/tensorflow-inception.yaml @@ -3,19 +3,19 @@ kind: Service metadata: name: {{ template "fullname" . }} labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" annotations: - "helm.sh/created": {{.Release.Time.Seconds | quote }} + "helm.sh/created": {{ .Release.Time.Seconds | quote }} spec: ports: - - port: {{.Values.ServicePort}} - targetPort: {{.Values.ContainerPort}} + - port: {{ .Values.ServicePort }} + targetPort: {{ .Values.ContainerPort }} selector: - component: "{{.Release.Name}}-{{.Values.Component}}" - type: {{.Values.ServiceType}} + component: "{{ .Release.Name }}-{{ .Values.Component }}" + type: {{ .Values.ServiceType }} --- apiVersion: extensions/v1beta1 @@ -23,41 +23,41 @@ kind: Deployment metadata: name: {{ template "fullname" . }} labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" annotations: - "helm.sh/created": {{.Release.Time.Seconds | quote }} + "helm.sh/created": {{ .Release.Time.Seconds | quote }} spec: replicas: {{ default 1 .Values.Replicas }} strategy: type: RollingUpdate selector: matchLabels: - component: "{{.Release.Name}}-{{.Values.Component}}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" template: metadata: labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" spec: containers: - name: {{ template "fullname" . }} - image: "{{.Values.Image}}:{{.Values.ImageTag}}" - imagePullPolicy: "{{.Values.ImagePullPolicy}}" + image: "{{ .Values.Image }}:{{ .Values.ImageTag }}" + imagePullPolicy: "{{ .Values.ImagePullPolicy }}" command: - "/bin/sh" - "-c" args: - - "/serving/bazel-bin/tensorflow_serving/model_servers/tensorflow_model_server --port={{.Values.ContainerPort}} --model_name=inception --model_base_path=/serving/inception-export" + - "/serving/bazel-bin/tensorflow_serving/model_servers/tensorflow_model_server --port={{ .Values.ContainerPort }} --model_name=inception --model_base_path=/serving/inception-export" ports: - - containerPort: {{.Values.ContainerPort}} + - containerPort: {{ .Values.ContainerPort }} readinessProbe: tcpSocket: - port: {{.Values.ContainerPort}} + port: {{ .Values.ContainerPort }} initialDelaySeconds: 15 timeoutSeconds: 1 resources: diff --git a/incubator/zookeeper/templates/NOTES.txt b/incubator/zookeeper/templates/NOTES.txt index e68b38c5e03a..0cfe82ed2725 100644 --- a/incubator/zookeeper/templates/NOTES.txt +++ b/incubator/zookeeper/templates/NOTES.txt @@ -1,24 +1,24 @@ Thank you for installing ZooKeeper on your Kubernetes cluster. More information about ZooKeeper can be found at https://zookeeper.apache.org/doc/current/ -1. ZooKeeper is not accessible outside of the Kubernetes cluster. Its purpose is -to provide coordination for distributed systems running inside the cluster. As -ZooKeeper uses a TCP based protocol with an internal wire format, you will -probably want to use an existing client library for communication with the -ensemble. -2. The officially maintained clients are written in C and Java, and these can be -obtained with ZooKeeper release from -http://www-us.apache.org/dist/zookeeper/zookeeper-3.4.9/. -A list of language specific bindings and higher level libraries is available +1. ZooKeeper is not accessible outside of the Kubernetes cluster. Its purpose is +to provide coordination for distributed systems running inside the cluster. As +ZooKeeper uses a TCP based protocol with an internal wire format, you will +probably want to use an existing client library for communication with the +ensemble. +2. The officially maintained clients are written in C and Java, and these can be +obtained with ZooKeeper release from +http://www-us.apache.org/dist/zookeeper/zookeeper-3.4.9/. +A list of language specific bindings and higher level libraries is available here https://cwiki.apache.org/confluence/display/ZOOKEEPER/ZKClientBindings. -3. Most ZooKeeper clients require a connection string when instantiating an -instance of the client library, or when first connecting the client to the +3. Most ZooKeeper clients require a connection string when instantiating an +instance of the client library, or when first connecting the client to the ensemble. The connection string takes the form -:,:,... . If your ensemble has greater than 1 -server, you should provide multiple servers in the connection string to allow -for the client to continue to function in the presence of individual server -failures. It is important that you do not use IP addresses and instead use the -DNS entries for the domain established by the StatefulSet's HeadlessService. -Your connection string should look like zk-{{.Release.Name | trunc 24 }}-0.{{ printf "zk-hsvc-%s" .Release.Name | trunc 24 }}:{{.Values.ClientPort}},zk-{{.Release.Name | trunc 24 }}-1.{{ printf "zk-hsvc-%s" .Release.Name | trunc 24 }}:{{.Values.ClientPort}},... . -You can also use the client Service zk-csvc:{{.Values.ClientPort}} to -connect to an available ZooKeeper server. \ No newline at end of file +:,:,... . If your ensemble has greater than 1 +server, you should provide multiple servers in the connection string to allow +for the client to continue to function in the presence of individual server +failures. It is important that you do not use IP addresses and instead use the +DNS entries for the domain established by the StatefulSet's HeadlessService. +Your connection string should look like zk-{{ .Release.Name | trunc 24 }}-0.{{ printf "zk-hsvc-%s" .Release.Name | trunc 24 }}:{{ .Values.ClientPort }},zk-{{ .Release.Name | trunc 24 }}-1.{{ printf "zk-hsvc-%s" .Release.Name | trunc 24 }}:{{ .Values.ClientPort }},... . +You can also use the client Service zk-csvc:{{ .Values.ClientPort }} to +connect to an available ZooKeeper server. diff --git a/incubator/zookeeper/templates/csvc.yaml b/incubator/zookeeper/templates/csvc.yaml index d51f80cac31c..692977ba7c72 100644 --- a/incubator/zookeeper/templates/csvc.yaml +++ b/incubator/zookeeper/templates/csvc.yaml @@ -3,13 +3,13 @@ kind: Service metadata: name: "{{ printf "zk-csvc-%s" .Release.Name | trunc 24 }}" labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" component: "{{ printf "zk-%s" .Release.Name | trunc 24 }}" spec: ports: - - port: {{.Values.ClientPort}} + - port: {{ .Values.ClientPort }} name: client selector: - component: "{{ printf "zk-%s" .Release.Name | trunc 24 }}" \ No newline at end of file + component: "{{ printf "zk-%s" .Release.Name | trunc 24 }}" diff --git a/incubator/zookeeper/templates/pdb.yaml b/incubator/zookeeper/templates/pdb.yaml index 4a2ab1ef4c65..bd1af2da4e59 100644 --- a/incubator/zookeeper/templates/pdb.yaml +++ b/incubator/zookeeper/templates/pdb.yaml @@ -3,18 +3,18 @@ kind: PodDisruptionBudget metadata: name: "{{ printf "zk-pdb-%s" .Release.Name | trunc 24 }}" annotations: - helm.sh/created: {{.Release.Time.Seconds | quote }} + helm.sh/created: {{ .Release.Time.Seconds | quote }} labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" component: "{{ printf "zk-%s" .Release.Name | trunc 24 }}" spec: selector: matchLabels: component: "{{ printf "zk-%s" .Release.Name | trunc 24 }}" - {{- if .Values.MinAvailable}} - minAvailable: {{.Values.MinAvailable}} - {{- else}} - minAvailable: {{sub .Values.Servers 1}} - {{- end}} \ No newline at end of file + {{- if .Values.MinAvailable }} + minAvailable: {{ .Values.MinAvailable }} + {{- else }} + minAvailable: {{ sub .Values.Servers 1 }} + {{- end }} diff --git a/incubator/zookeeper/templates/ss.yaml b/incubator/zookeeper/templates/ss.yaml index d4bc8929fe2e..b2366028a8f5 100644 --- a/incubator/zookeeper/templates/ss.yaml +++ b/incubator/zookeeper/templates/ss.yaml @@ -3,21 +3,21 @@ kind: StatefulSet metadata: name: "{{ printf "zk-%s" .Release.Name | trunc 24 }}" annotations: - helm.sh/created: {{.Release.Time.Seconds | quote }} + helm.sh/created: {{ .Release.Time.Seconds | quote }} labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" component: "{{ printf "zk-%s" .Release.Name | trunc 24 }}" spec: serviceName: "{{ printf "zk-hsvc-%s" .Release.Name | trunc 24 }}" - replicas: {{.Values.Servers}} + replicas: {{ .Values.Servers }} template: metadata: labels: component: "{{ printf "zk-%s" .Release.Name | trunc 24 }}" spec: - {{- if eq .Values.AntiAffinity "hard"}} + {{- if eq .Values.AntiAffinity "hard" }} affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -28,7 +28,7 @@ spec: values: - "{{ printf "zk-%s" .Release.Name | trunc 24 }}" topologyKey: "kubernetes.io/hostname" - {{- else if eq .Values.AntiAffinity "soft"}} + {{- else if eq .Values.AntiAffinity "soft" }} affinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 1 @@ -37,50 +37,50 @@ spec: matchExpressions: - key: "component" operator: In - values: + values: - "{{ printf "zk-%s" .Release.Name | trunc 24 }}" topologyKey: "kubernetes.io/hostname" - {{- end}} + {{- end }} containers: - name: k8szk - imagePullPolicy: {{.Values.ImagePullPolicy}} + imagePullPolicy: {{ .Values.ImagePullPolicy }} image: gcr.io/google_samples/k8szk:v2 resources: requests: - memory: {{.Values.Memory}} - cpu: {{.Values.Cpu}} + memory: {{ .Values.Memory }} + cpu: {{ .Values.Cpu }} ports: - - containerPort: {{.Values.ClientPort}} + - containerPort: {{ .Values.ClientPort }} name: client - - containerPort: {{.Values.ServerPort}} + - containerPort: {{ .Values.ServerPort }} name: server - - containerPort: {{.Values.LeaderElectionPort}} + - containerPort: {{ .Values.LeaderElectionPort }} name: leader-election env: - name : ZK_REPLICAS - value: "{{.Values.Servers}}" + value: "{{ .Values.Servers }}" - name : ZK_HEAP_SIZE - value: "{{.Values.Heap}}" + value: "{{ .Values.Heap }}" - name : ZK_TICK_TIME - value: "{{.Values.TickTimeMs}}" + value: "{{ .Values.TickTimeMs }}" - name : ZK_INIT_LIMIT - value: "{{.Values.InitTicks}}" + value: "{{ .Values.InitTicks }}" - name : ZK_SYNC_LIMIT - value: "{{.Values.SyncTicks}}" + value: "{{ .Values.SyncTicks }}" - name : ZK_MAX_CLIENT_CNXNS - value: "{{.Values.ClientCnxns}}" + value: "{{ .Values.ClientCnxns }}" - name: ZK_SNAP_RETAIN_COUNT - value: "{{.Values.SnapRetain}}" + value: "{{ .Values.SnapRetain }}" - name: ZK_PURGE_INTERVAL - value: "{{.Values.PurgeHours}}" + value: "{{ .Values.PurgeHours }}" - name: ZK_LOG_LEVEL - value: {{.Values.LogLevel}} + value: {{ .Values.LogLevel }} - name: ZK_CLIENT_PORT - value: "{{.Values.ClientPort}}" + value: "{{ .Values.ClientPort }}" - name: ZK_SERVER_PORT - value: "{{.Values.ServerPort}}" + value: "{{ .Values.ServerPort }}" - name: ZK_ELECTION_PORT - value: "{{.Values.LeaderElectionPort}}" + value: "{{ .Values.LeaderElectionPort }}" command: - sh - -c @@ -89,14 +89,14 @@ spec: exec: command: - "zkOk.sh" - initialDelaySeconds: {{.Values.ProbeInitialDelaySeconds}} - timeoutSeconds: {{.Values.ProbeTimeoutSeconds}} + initialDelaySeconds: {{ .Values.ProbeInitialDelaySeconds }} + timeoutSeconds: {{ .Values.ProbeTimeoutSeconds }} livenessProbe: exec: command: - "zkOk.sh" - initialDelaySeconds: {{.Values.ProbeInitialDelaySeconds}} - timeoutSeconds: {{.Values.ProbeTimeoutSeconds}} + initialDelaySeconds: {{ .Values.ProbeInitialDelaySeconds }} + timeoutSeconds: {{ .Values.ProbeTimeoutSeconds }} volumeMounts: - name: datadir mountPath: /var/lib/zookeeper @@ -110,7 +110,7 @@ spec: accessModes: [ "ReadWriteOnce" ] resources: requests: - storage: {{.Values.Storage}} - {{- if .Values.StorageClass}} - storageClassName: {{.Values.StorageClass | quote}} - {{- end}} + storage: {{ .Values.Storage }} + {{- if .Values.StorageClass }} + storageClassName: {{ .Values.StorageClass | quote }} + {{- end }} diff --git a/incubator/zookeeper/templates/svc.yaml b/incubator/zookeeper/templates/svc.yaml index 968213f9d03b..6d818739030d 100644 --- a/incubator/zookeeper/templates/svc.yaml +++ b/incubator/zookeeper/templates/svc.yaml @@ -3,16 +3,16 @@ kind: Service metadata: name: "{{ printf "zk-hsvc-%s" .Release.Name | trunc 24 }}" labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" component: "{{ printf "zk-%s" .Release.Name | trunc 24 }}" spec: ports: - - port: {{.Values.ServerPort}} + - port: {{ .Values.ServerPort }} name: server - - port: {{.Values.LeaderElectionPort}} + - port: {{ .Values.LeaderElectionPort }} name: leader-election clusterIP: None selector: - component: "{{ printf "zk-%s" .Release.Name | trunc 24 }}" \ No newline at end of file + component: "{{ printf "zk-%s" .Release.Name | trunc 24 }}" diff --git a/incubator/artifactory/.helmignore b/stable/artifactory/.helmignore similarity index 100% rename from incubator/artifactory/.helmignore rename to stable/artifactory/.helmignore diff --git a/incubator/artifactory/Chart.yaml b/stable/artifactory/Chart.yaml old mode 100755 new mode 100644 similarity index 79% rename from incubator/artifactory/Chart.yaml rename to stable/artifactory/Chart.yaml index 9b04b708a751..ffe43f474fba --- a/incubator/artifactory/Chart.yaml +++ b/stable/artifactory/Chart.yaml @@ -1,6 +1,8 @@ +apiVersion: v1 name: artifactory home: https://www.jfrog.com/artifactory/ -version: 5.2.0 +version: 5.4.1 +appVersion: 5.4.1 description: Universal Repository Manager supporting all major packaging formats, build tools and CI servers. keywords: - artifactory @@ -9,6 +11,8 @@ sources: - https://bintray.com/jfrog/product/JFrog-Artifactory-Pro/view - https://github.com/JFrogDev maintainers: - - name: Jainish shah + - name: jainishshah17 email: jainishs@jfrog.com + - name: eldada + email: eldada@jfrog.com icon: https://raw.githubusercontent.com/JFrogDev/artifactory-dcos/master/images/jfrog_med.png diff --git a/stable/artifactory/README.md b/stable/artifactory/README.md new file mode 100644 index 000000000000..d4f1baef5bb9 --- /dev/null +++ b/stable/artifactory/README.md @@ -0,0 +1,111 @@ +# JFrog Artifactory Helm Chart + +## Prerequisites Details + +* Artifactory Pro trial license [get one from here](https://www.jfrog.com/artifactory/free-trial/) + +## Chart Details +This chart will do the following: + +* Deploy Artifactory-Oss +* Deploy Artifactory-Pro + +## Installing the Chart + +To install the chart with the release name `artifactory`: + +```bash +$ helm install --name artifactory stable/artifactory +``` + +### Deploying Artifactory OSS +By default it will run Artifactory-Pro to run Artifactory-Oss use following command: +```bash +$ helm install --name artifactory --set artifactory.image.repository=docker.bintray.io/jfrog/artifactory-oss stable/artifactory +``` + +### Accessing Artifactory +**NOTE:** It might take a few minutes for Artifactory's public IP to become available. +Follow the instructions outputted by the install command to get the Artifactory IP to access it. + +### Updating Artifactory +Once you have a new chart version, you can update your deployment with +```bash +$ helm upgrade artifactory --namespace artifactory stable/artifactory +``` + +This will apply any configuration changes on your existing deployment. + +### Customizing Database password +You can override the specified database password (set in [values.yaml](values.yaml)), by passing it as a parameter in the install command line +```bash +$ helm install --name artifactory --namespace artifactory --set database.env.pass=12_hX34qwerQ2 stable/artifactory +``` + +You can customise other parameters in the same way, by passing them on `helm install` command line. + +### Deleting Artifactory +```bash +$ helm delete --purge artifactory +``` + +This will completely delete your Artifactory Pro deployment. +**IMPORTANT:** This will also delete your data volumes. You will loose all data! + +## Configuration + +The following tables lists the configurable parameters of the artifactory chart and their default values. + +| Parameter | Description | Default | +|---------------------------|-----------------------------------|----------------------------------------------------------| +| `database.name` | Database name | `postgresql` | +| `database.replicaCount` | Database replica count | `1` | +| `database.env.type` | Database type | `postgresql` | +| `database.env.name` | Database name | `artifactory` | +| `database.env.user` | Database username | `artifactory` | +| `database.env.pass` | Database password | `artXifactory1973` | +| `database.image.repository` | Database container image | `docker.bintray.io/postgres` | +| `database.image.version` | Database container image tag | `9.5.2` | +| `database.image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `database.service.type` | Database service type | `ClusterIP` | +| `database.externalPort` | Database service external port | `5432` | +| `database.internalPort` | Database service internal port | `5432` | +| `database.persistence.mountPath` | Database persistence volume mount path | `"/var/lib/postgresql/data"` | +| `database.persistence.enabled` | Database persistence volume enabled | `true` | +| `database.persistence.accessMode` | Database persistence volume access mode | `ReadWriteOnce` | +| `database.persistence.size` | Database persistence volume size | `10Gi` | +| `artifactory.name` | Artifactory name | `artifactory` | +| `artifactory.replicaCount` | Replica count for Artifactory deployment| `1` | +| `artifactory.image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `artifactory.image.repository` | Container image | `docker.bintray.io/jfrog/artifactory-pro` | +| `artifactory.image.version` | Container image tag | `5.4.1` | +| `artifactory.service.type`| Artifactory service type | `ClusterIP` | +| `artifactory.externalPort` | Artifactory service external port | `8081` | +| `artifactory.internalPort` | Artifactory service internal port | `8081` | +| `artifactory.persistence.mountPath` | Artifactory persistence volume mount path | `"/var/opt/jfrog/artifactory"` | +| `artifactory.persistence.enabled` | Artifactory persistence volume enabled | `true` | +| `artifactory.persistence.accessMode` | Artifactory persistence volume access mode | `ReadWriteOnce` | +| `artifactory.persistence.size` | Artifactory persistence volume size | `20Gi` | +| `nginx.name` | Nginx name | `nginx` | +| `nginx.replicaCount` | Nginx replica count | `1` | +| `nginx.image.repository` | Container image | `docker.bintray.io/jfrog/nginx-artifactory-pro` | +| `nginx.image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `nginx.image.version` | Container image tag | `5.4.1` | +| `nginx.service.type`| Nginx service type | `LoadBalancer` | +| `nginx.externalPortHttp` | Nginx service external port | `80` | +| `nginx.internalPortHttp` | Nginx service internal port | `80` | +| `nginx.externalPortHttps` | Nginx service external port | `443` | +| `nginx.internalPortHttps` | Nginx service internal port | `443` | +| `nginx.env.artUrl` | Nginx Environment variable Artifactory URL | `"http://artifactory:8081/artifactory"` | +| `nginx.env.ssl` | Nginx Environment enable ssl | `true` | +| `nginx.persistence.mountPath` | Nginx persistence volume mount path | `"/var/opt/jfrog/nginx"` | +| `nginx.persistence.enabled` | Nginx persistence volume enabled | `true` | +| `nginx.persistence.accessMode` | Nginx persistence volume access mode | `ReadWriteOnce` | +| `nginx.persistence.size` | Nginx persistence volume size | `5Gi` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + + +## Useful links +https://www.jfrog.com +https://www.jfrog.com/confluence/ diff --git a/stable/artifactory/templates/NOTES.txt b/stable/artifactory/templates/NOTES.txt new file mode 100644 index 000000000000..d744ea9a4449 --- /dev/null +++ b/stable/artifactory/templates/NOTES.txt @@ -0,0 +1,27 @@ +Congratulations. You have just deployed JFrog Artifactory Pro! + +1. Get the Artifactory URL by running these commands: + + {{- if contains "NodePort" .Values.nginx.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "nginx.name" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT/ + + {{- else if contains "LoadBalancer" .Values.nginx.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of the service by running 'kubectl get svc -w {{ template "nginx.name" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "nginx.name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP/ + + {{- else if contains "ClusterIP" .Values.nginx.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "component={{ .Values.nginx.name }}" -o jsonpath="{.items[0].metadata.name}") + echo http://127.0.0.1:{{ .Values.nginx.externalPortHttp }} + kubectl port-forward --namespace {{ .Release.Namespace }} $POD_NAME {{ .Values.nginx.externalPortHttp }}:{{ .Values.nginx.internalPortHttp }} + + {{- end }} + +2. Open Artifactory in your browser + Default credential for Artifactory: + user: admin + password: password diff --git a/stable/artifactory/templates/_helpers.tpl b/stable/artifactory/templates/_helpers.tpl new file mode 100644 index 000000000000..fc351b14fe5b --- /dev/null +++ b/stable/artifactory/templates/_helpers.tpl @@ -0,0 +1,65 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the name artifactory service. +*/}} +{{- define "artifactory.name" -}} +{{- default .Values.artifactory.name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the name database service. +*/}} +{{- define "database.name" -}} +{{- default .Values.database.name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the name nginx service. +*/}} +{{- define "nginx.name" -}} +{{- default .Values.nginx.name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified application name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "artifactory.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.artifactory.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified database name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "database.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.database.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified nginx name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "nginx.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.nginx.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} \ No newline at end of file diff --git a/stable/artifactory/templates/artifactory-deployment.yaml b/stable/artifactory/templates/artifactory-deployment.yaml new file mode 100644 index 000000000000..6a3c71622c37 --- /dev/null +++ b/stable/artifactory/templates/artifactory-deployment.yaml @@ -0,0 +1,59 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "artifactory.fullname" . }} + labels: + app: {{ template "name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.artifactory.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + replicas: {{ .Values.artReplicaCount }} + template: + metadata: + labels: + app: {{ template "name" . }} + component: "{{ .Values.artifactory.name }}" + release: {{ .Release.Name }} + annotations: + pod.beta.kubernetes.io/init-containers: '[{ + "name": "remove-lost-found", + "image": {{ .Values.initContainerImage | quote }}, + "command": ["rm", "-rf", "{{ .Values.artifactory.persistence.mountPath }}/lost+found"], + "volumeMounts": [{ + "name": "artifactory-volume", + "mountPath": {{ .Values.artifactory.persistence.mountPath | quote }} + }], + "imagePullPolicy": {{ .Values.artifactory.image.pullPolicy | quote }} + }]' + spec: + containers: + - name: {{ .Values.artifactory.name }} + image: "{{ .Values.artifactory.image.repository }}:{{ .Values.artifactory.image.version }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + env: + - name: DB_TYPE + value: {{ .Values.database.env.type }} + - name: DB_USER + value: {{ .Values.database.env.user }} + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "fullname" . }} + key: artifactory-database-password + - name: DB_HOST + value: {{ template "database.name" . }} + ports: + - containerPort: {{ .Values.artifactory.internalPort }} + volumeMounts: + - mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + name: artifactory-volume + volumes: + - name: artifactory-volume + {{- if .Values.artifactory.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ template "artifactory.fullname" . }} + {{- else }} + emptyDir: {} + {{- end -}} \ No newline at end of file diff --git a/stable/artifactory/templates/artifactory-pvc.yaml b/stable/artifactory/templates/artifactory-pvc.yaml new file mode 100755 index 000000000000..0b475eeadd04 --- /dev/null +++ b/stable/artifactory/templates/artifactory-pvc.yaml @@ -0,0 +1,23 @@ +{{- if .Values.artifactory.persistence.enabled }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "artifactory.fullname" . }} + labels: + app: {{ template "name" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + annotations: + {{- if .Values.artifactory.persistence.storageClass }} + volume.beta.kubernetes.io/storage-class: {{ .Values.artifactory.persistence.storageClass | quote }} + {{- else }} + volume.alpha.kubernetes.io/storage-class: default + {{- end }} +spec: + accessModes: + - {{ .Values.artifactory.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.artifactory.persistence.size | quote }} +{{- end }} \ No newline at end of file diff --git a/stable/artifactory/templates/artifactory-service.yaml b/stable/artifactory/templates/artifactory-service.yaml new file mode 100644 index 000000000000..f925b8819d81 --- /dev/null +++ b/stable/artifactory/templates/artifactory-service.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "artifactory.name" . }} + labels: + app: {{ template "name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.artifactory.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + type: {{ .Values.artifactory.service.type }} + ports: + - port: {{ .Values.artifactory.externalPort }} + targetPort: {{ .Values.artifactory.internalPort }} + protocol: TCP + name: {{ .Release.Name }} + selector: + app: {{ template "name" . }} + component: "{{ .Values.artifactory.name }}" + release: {{ .Release.Name }} \ No newline at end of file diff --git a/stable/artifactory/templates/nginx-deployment.yaml b/stable/artifactory/templates/nginx-deployment.yaml new file mode 100644 index 000000000000..df05aa488c4a --- /dev/null +++ b/stable/artifactory/templates/nginx-deployment.yaml @@ -0,0 +1,53 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "nginx.fullname" . }} + labels: + app: {{ template "name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.nginx.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + replicas: {{ .Values.nginx.replicaCount }} + template: + metadata: + labels: + app: {{ template "name" . }} + component: "{{ .Values.nginx.name }}" + release: {{ .Release.Name }} + annotations: + pod.beta.kubernetes.io/init-containers: '[{ + "name": "remove-lost-found", + "image": {{ .Values.initContainerImage | quote }}, + "command": ["rm", "-rf", "{{ .Values.nginx.persistence.mountPath }}/lost+found"], + "volumeMounts": [{ + "name": "nginx-volume", + "mountPath": {{ .Values.nginx.persistence.mountPath | quote }} + }], + "imagePullPolicy": {{ .Values.nginx.image.pullPolicy | quote }} + }]' + spec: + containers: + - name: {{ .Values.nginx.name }} + image: "{{ .Values.nginx.image.repository }}:{{ .Values.nginx.image.version }}" + imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} + env: + - name: ART_BASE_URL + value: {{ .Values.nginx.env.artUrl }} + - name: SSL + value: "{{ .Values.nginx.env.ssl }}" + ports: + - containerPort: {{ .Values.nginx.internalPortHttp }} + - containerPort: {{ .Values.nginx.internalPortHttps }} + volumeMounts: + - mountPath: {{ .Values.nginx.persistence.mountPath | quote }} + name: nginx-volume + volumes: + - name: nginx-volume + {{- if .Values.nginx.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ template "nginx.fullname" . }} + {{- else }} + emptyDir: {} + {{- end -}} \ No newline at end of file diff --git a/stable/artifactory/templates/nginx-pvc.yaml b/stable/artifactory/templates/nginx-pvc.yaml new file mode 100755 index 000000000000..7c499120ffc1 --- /dev/null +++ b/stable/artifactory/templates/nginx-pvc.yaml @@ -0,0 +1,23 @@ +{{- if .Values.nginx.persistence.enabled }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "nginx.fullname" . }} + labels: + app: {{ template "name" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + annotations: + {{- if .Values.nginx.persistence.storageClass }} + volume.beta.kubernetes.io/storage-class: {{ .Values.nginx.persistence.storageClass | quote }} + {{- else }} + volume.alpha.kubernetes.io/storage-class: default + {{- end }} +spec: + accessModes: + - {{ .Values.nginx.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.nginx.persistence.size | quote }} +{{- end }} \ No newline at end of file diff --git a/stable/artifactory/templates/nginx-service.yaml b/stable/artifactory/templates/nginx-service.yaml new file mode 100644 index 000000000000..a550c15fba8f --- /dev/null +++ b/stable/artifactory/templates/nginx-service.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "nginx.name" . }} + labels: + app: {{ template "name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.nginx.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + type: {{ .Values.nginx.service.type }} + ports: + - port: {{ .Values.nginx.externalPortHttp }} + targetPort: {{ .Values.nginx.internalPortHttp }} + protocol: TCP + name: {{ .Values.nginx.name }}http + - port: {{ .Values.nginx.externalPortHttps }} + targetPort: {{ .Values.nginx.internalPortHttps }} + protocol: TCP + name: {{ .Release.Name }}https + selector: + app: {{ template "name" . }} + component: "{{ .Values.nginx.name }}" + release: {{ .Release.Name }} \ No newline at end of file diff --git a/stable/artifactory/templates/postgresql-deployment.yaml b/stable/artifactory/templates/postgresql-deployment.yaml new file mode 100644 index 000000000000..029f3b65d5bf --- /dev/null +++ b/stable/artifactory/templates/postgresql-deployment.yaml @@ -0,0 +1,57 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "database.fullname" . }} + labels: + app: {{ template "name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.database.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + replicas: {{ .Values.replicaCount }} + template: + metadata: + labels: + app: {{ template "name" . }} + component: "{{ .Values.database.name }}" + release: {{ .Release.Name }} + annotations: + pod.beta.kubernetes.io/init-containers: '[{ + "name": "remove-lost-found", + "image": {{ .Values.initContainerImage | quote }}, + "command": ["rm", "-rf", "{{ .Values.database.persistence.mountPath }}/lost+found"], + "volumeMounts": [{ + "name": "postgresql-volume", + "mountPath": {{ .Values.database.persistence.mountPath | quote }} + }], + "imagePullPolicy": {{ .Values.database.image.pullPolicy | quote }} + }]' + spec: + containers: + - name: {{ .Values.database.name }} + image: "{{ .Values.database.image.repository }}:{{ .Values.database.image.version }}" + imagePullPolicy: {{ .Values.database.image.pullPolicy }} + env: + - name: POSTGRES_DB + value: {{ .Values.database.env.name }} + - name: POSTGRES_USER + value: {{ .Values.database.env.user }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "fullname" . }} + key: artifactory-database-password + ports: + - containerPort: {{ .Values.database.internalPort }} + volumeMounts: + - mountPath: {{ .Values.database.persistence.mountPath | quote }} + name: postgresql-volume + volumes: + - name: postgresql-volume + {{- if .Values.database.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ template "database.fullname" . }} + {{- else }} + emptyDir: {} + {{- end -}} \ No newline at end of file diff --git a/stable/artifactory/templates/postgresql-pvc.yaml b/stable/artifactory/templates/postgresql-pvc.yaml new file mode 100755 index 000000000000..2fc90cc7c337 --- /dev/null +++ b/stable/artifactory/templates/postgresql-pvc.yaml @@ -0,0 +1,23 @@ +{{- if .Values.database.persistence.enabled }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "database.fullname" . }} + labels: + app: {{ template "name" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + annotations: + {{- if .Values.database.persistence.storageClass }} + volume.beta.kubernetes.io/storage-class: {{ .Values.database.persistence.storageClass | quote }} + {{- else }} + volume.alpha.kubernetes.io/storage-class: default + {{- end }} +spec: + accessModes: + - {{ .Values.database.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.database.persistence.size | quote }} +{{- end }} diff --git a/stable/artifactory/templates/postgresql-secret.yaml b/stable/artifactory/templates/postgresql-secret.yaml new file mode 100644 index 000000000000..3bd612f0b839 --- /dev/null +++ b/stable/artifactory/templates/postgresql-secret.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Secret +metadata: + labels: + app: {{ template "name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "fullname" . }} +type: Opaque +data: + {{- if .Values.database.env.pass }} + artifactory-database-password: {{ .Values.database.env.pass | b64enc | quote }} + {{- else }} + artifactory-database-password: {{ randAlphaNum 10 | b64enc | quote }} + {{- end }} diff --git a/stable/artifactory/templates/postgresql-service.yaml b/stable/artifactory/templates/postgresql-service.yaml new file mode 100644 index 000000000000..b29695521d10 --- /dev/null +++ b/stable/artifactory/templates/postgresql-service.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "database.name" . }} + labels: + app: {{ template "name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.database.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + type: {{ .Values.database.service.type }} + ports: + - port: {{ .Values.database.externalPort }} + targetPort: {{ .Values.database.internalPort }} + protocol: TCP + name: {{ .Release.Name }} + selector: + app: {{ template "name" . }} + component: "{{ .Values.database.name }}" + release: {{ .Release.Name }} \ No newline at end of file diff --git a/stable/artifactory/values.yaml b/stable/artifactory/values.yaml new file mode 100644 index 000000000000..00ff26bf9772 --- /dev/null +++ b/stable/artifactory/values.yaml @@ -0,0 +1,74 @@ +# Default values for artifactory. +# This is a YAML-formatted file. + +# Beware when changing values here. You should know what you are doing! +# Access the values with {{ .Values.key.subkey }} + +# Common +initContainerImage: "busybox:1.26.2" + +# Database +database: + name: postgresql + replicaCount: 1 + env: + type: postgresql + name: artifactory + user: artifactory +# pass: artXifactory1973 + image: + repository: docker.bintray.io/postgres + version: 9.5.2 + pullPolicy: IfNotPresent + service: + type: ClusterIP + externalPort: 5432 + internalPort: 5432 + persistence: + mountPath: "/var/lib/postgresql/data" + enabled: true + accessMode: ReadWriteOnce + size: 10Gi + +# Artifactory +artifactory: + name: artifactory + replicaCount: 1 + image: + #repository: "docker.bintray.io/jfrog/artifactory-oss" + repository: "docker.bintray.io/jfrog/artifactory-pro" + version: 5.4.2 + pullPolicy: IfNotPresent + service: + type: ClusterIP + externalPort: 8081 + internalPort: 8081 + persistence: + mountPath: "/var/opt/jfrog/artifactory" + enabled: true + accessMode: ReadWriteOnce + size: 20Gi + +# Nginx +nginx: + name: nginx + replicaCount: 1 + image: + repository: "docker.bintray.io/jfrog/nginx-artifactory-pro" + version: 5.4.2 + pullPolicy: IfNotPresent + service: + ## For minikube, set this to NodePort, elsewhere use LoadBalancer + type: LoadBalancer + externalPortHttp: 80 + internalPortHttp: 80 + externalPortHttps: 443 + internalPortHttps: 443 + env: + artUrl: "http://artifactory:8081/artifactory" + ssl: true + persistence: + mountPath: "/var/opt/jfrog/nginx" + enabled: true + accessMode: ReadWriteOnce + size: 5Gi \ No newline at end of file diff --git a/stable/chaoskube/templates/deployment.yaml b/stable/chaoskube/templates/deployment.yaml index 1c022c341392..fe171e21f06c 100644 --- a/stable/chaoskube/templates/deployment.yaml +++ b/stable/chaoskube/templates/deployment.yaml @@ -26,7 +26,7 @@ spec: - --labels={{ .Values.labels }} - --annotations={{ .Values.annotations }} - --namespaces={{ .Values.namespaces }} - {{- if not .Values.dryRun}} + {{- if not .Values.dryRun }} - --no-dry-run {{- end }} resources: diff --git a/stable/chronograf/README.md b/stable/chronograf/README.md index 4aace4a1ba32..47751e259150 100644 --- a/stable/chronograf/README.md +++ b/stable/chronograf/README.md @@ -68,4 +68,4 @@ $ helm install --name my-release -f values.yaml stable/chronograf The [Chronograf](https://quay.io/influxdb/chronograf) image stores data in the `/var/lib/chronograf` directory in the container. -The chart optionally mounts a [Persistent Volume](kubernetes.io/docs/user-guide/persistent-volumes/) volume at this location. The volume is created using dynamic volume provisioning. +The chart optionally mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) volume at this location. The volume is created using dynamic volume provisioning. diff --git a/stable/chronograf/templates/deployment.yaml b/stable/chronograf/templates/deployment.yaml index 0d879b228455..41fe83f8fe71 100644 --- a/stable/chronograf/templates/deployment.yaml +++ b/stable/chronograf/templates/deployment.yaml @@ -25,7 +25,7 @@ spec: secretKeyRef: name: {{ template "fullname" . }} key: token_secret -{{- if .Values.oauth.github.enabled}} +{{- if .Values.oauth.github.enabled }} - name: GH_CLIENT_ID valueFrom: secretKeyRef: @@ -42,7 +42,7 @@ spec: name: {{ template "fullname" . }} key: gh_orgs {{- end }} -{{- if .Values.oauth.heroku.enabled}} +{{- if .Values.oauth.heroku.enabled }} - name: HEROKU_CLIENT_ID valueFrom: secretKeyRef: @@ -59,7 +59,7 @@ spec: name: {{ template "fullname" . }} key: he_orgs {{- end }} -{{- if .Values.oauth.google.enabled}} +{{- if .Values.oauth.google.enabled }} - name: GOOGLE_CLIENT_ID valueFrom: secretKeyRef: diff --git a/stable/cockroachdb/templates/cockroachdb-petset.yaml b/stable/cockroachdb/templates/cockroachdb-statefulset.yaml similarity index 76% rename from stable/cockroachdb/templates/cockroachdb-petset.yaml rename to stable/cockroachdb/templates/cockroachdb-statefulset.yaml index fcc7c72b6d51..0132381cc36a 100644 --- a/stable/cockroachdb/templates/cockroachdb-petset.yaml +++ b/stable/cockroachdb/templates/cockroachdb-statefulset.yaml @@ -5,23 +5,23 @@ metadata: # automatically load balance connections to the different database pods. name: "{{ printf "%s-%s" .Release.Name .Values.Name | trunc 56 }}-public" labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" spec: ports: # The main port, served by gRPC, serves Postgres-flavor SQL, internode # traffic and the cli. - - port: {{.Values.GrpcPort}} - targetPort: {{.Values.GrpcPort}} + - port: {{ .Values.GrpcPort }} + targetPort: {{ .Values.GrpcPort }} name: grpc # The secondary port serves the UI as well as health and debug endpoints. - - port: {{.Values.HttpPort}} - targetPort: {{.Values.HttpPort}} + - port: {{ .Values.HttpPort }} + targetPort: {{ .Values.HttpPort }} name: http selector: - component: "{{.Release.Name}}-{{.Values.Component}}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" --- apiVersion: v1 kind: Service @@ -32,10 +32,10 @@ metadata: # in most circumstances. name: "{{ printf "%s-%s" .Release.Name .Values.Name | trunc 56 }}" labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" annotations: # This is needed to make the peer-finder work properly and to help avoid # edge cases where instance 0 comes up after losing its data and needs to @@ -50,30 +50,30 @@ metadata: prometheus.io/port: "8080" spec: ports: - - port: {{.Values.GrpcPort}} - targetPort: {{.Values.GrpcPort}} + - port: {{ .Values.GrpcPort }} + targetPort: {{ .Values.GrpcPort }} name: grpc - - port: {{.Values.HttpPort}} - targetPort: {{.Values.HttpPort}} + - port: {{ .Values.HttpPort }} + targetPort: {{ .Values.HttpPort }} name: http clusterIP: None selector: - component: "{{.Release.Name}}-{{.Values.Component}}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" --- apiVersion: policy/v1beta1 kind: PodDisruptionBudget metadata: name: "{{ printf "%s-%s" .Release.Name .Values.Name | trunc 56 }}-budget" labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" spec: selector: matchLabels: - component: "{{.Release.Name}}-{{.Values.Component}}" - minAvailable: {{.Values.MinAvailable}} + component: "{{ .Release.Name }}-{{ .Values.Component }}" + minAvailable: {{ .Values.MinAvailable }} --- apiVersion: apps/v1beta1 kind: StatefulSet @@ -81,14 +81,14 @@ metadata: name: "{{ printf "%s-%s" .Release.Name .Values.Name | trunc 56 }}" spec: serviceName: "{{ printf "%s-%s" .Release.Name .Values.Name | trunc 56 }}" - replicas: {{default 3 .Values.Replicas}} + replicas: {{ default 3 .Values.Replicas }} template: metadata: labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" annotations: scheduler.alpha.kubernetes.io/affinity: > { @@ -99,7 +99,7 @@ spec: "matchExpressions": [{ "key": "component", "operator": "In", - "values": ["{{.Release.Name}}-{{.Values.Component}}"] + "values": ["{{ .Release.Name }}-{{ .Values.Component }}"] }] }, "topologyKey": "kubernetes.io/hostname" @@ -121,12 +121,12 @@ spec: pod.alpha.kubernetes.io/init-containers: '[ { "name": "bootstrap", - "image": "{{.Values.BootstrapImage}}:{{.Values.BootstrapImageTag}}", - "imagePullPolicy": "{{.Values.ImagePullPolicy}}", + "image": "{{ .Values.BootstrapImage }}:{{ .Values.BootstrapImageTag }}", + "imagePullPolicy": "{{ .Values.ImagePullPolicy }}", "args": [ "-on-start=/on-start.sh", "-service={{ printf "%s-%s" .Release.Name .Values.Name | trunc 56 }}", - "-domain={{.Values.ClusterDomain}}" + "-domain={{ .Values.ClusterDomain }}" ], "env": [ { @@ -150,12 +150,12 @@ spec: spec: containers: - name: "{{ printf "%s-%s" .Release.Name .Values.Name | trunc 56 }}" - image: "{{.Values.Image}}:{{.Values.ImageTag}}" - imagePullPolicy: "{{.Values.ImagePullPolicy}}" + image: "{{ .Values.Image }}:{{ .Values.ImageTag }}" + imagePullPolicy: "{{ .Values.ImagePullPolicy }}" ports: - - containerPort: {{.Values.GrpcPort}} + - containerPort: {{ .Values.GrpcPort }} name: grpc - - containerPort: {{.Values.HttpPort}} + - containerPort: {{ .Values.HttpPort }} name: http resources: {{ toYaml .Values.resources | indent 10 }} @@ -196,10 +196,10 @@ spec: - metadata: name: datadir annotations: - volume.alpha.kubernetes.io/storage-class: "{{.Values.StorageClass}}" + volume.alpha.kubernetes.io/storage-class: "{{ .Values.StorageClass }}" spec: accessModes: - "ReadWriteOnce" resources: requests: - storage: "{{.Values.Storage}}" + storage: "{{ .Values.Storage }}" diff --git a/stable/concourse/Chart.yaml b/stable/concourse/Chart.yaml index 859b51a618ec..d07e6cf74bee 100644 --- a/stable/concourse/Chart.yaml +++ b/stable/concourse/Chart.yaml @@ -1,5 +1,5 @@ name: concourse -version: 0.1.3 +version: 0.2.0 description: Concourse is a simple and scalable CI system. icon: https://avatars1.githubusercontent.com/u/7809479 keywords: diff --git a/stable/concourse/README.md b/stable/concourse/README.md index d3f83487b36b..d986f039d688 100644 --- a/stable/concourse/README.md +++ b/stable/concourse/README.md @@ -53,6 +53,30 @@ Scaling should typically be managed via the `helm upgrade` command, but `Statefu $ kubectl scale statefulset my-release-worker --replicas=3 ``` +### Restarting workers + +If worker pods go down, their persistent volumes are changed, or if you're having other issues with them, you'll need to restart the workers. Concourse workers were designed to be deployed onto infrastructure VMs which are less "ephemeral" than pods, so it isn't good at detecting when a worker goes down and comes back under the same hostname. + +Scale the workers down to 0: + +``` +kubectl scale statefulset concourse-worker --replicas=0 + +``` + +And then `fly workers` until the workers are detected to be `stalled`. Then for each worker +``` +fly prune-worker -w concourse-worker-0 +fly prune-worker -w concourse-worker-1 +... + +``` +And finally + +``` +kubectl scale statefulset concourse-worker --replicas=3 +``` + ## Configuration The following tables lists the configurable parameters of the Concourse chart and their default values. @@ -110,6 +134,8 @@ The following tables lists the configurable parameters of the Concourse chart an | `persistence.worker.class` | Concourse Worker Persistent Volume Storage Class | `generic` | | `persistence.worker.accessMode` | Concourse Worker Persistent Volume Access Mode | `ReadWriteOnce` | | `persistence.worker.size` | Concourse Worker Persistent Volume Storage Size | `10Gi` | +| `postgresql.enabled` | Enable PostgreSQL as a chart dependency | `true` | +| `postgresql.uri` | PostgreSQL connection URI | `nil` | | `postgresql.postgresUser` | PostgreSQL User to create | `concourse` | | `postgresql.postgresPassword` | PostgreSQL Password for the new user | `concourse` | | `postgresql.postgresDatabase` | PostgreSQL Database to create | `concourse` | @@ -244,3 +270,26 @@ web: hosts: - concourse.domain.com ``` + + +### PostgreSQL + +By default, this chart will use a PostgreSQL database deployed as a chart dependency. You can also bring your own PostgreSQL. To do so, set the following in your custom `values.yaml` file: + +```yaml +## Configuration values for the postgresql dependency. +## ref: https://github.com/kubernetes/charts/blob/master/stable/postgresql/README.md +## +postgresql: + + ## Use the PostgreSQL chart dependency. + ## Set to false if bringing your own PostgreSQL. + ## + enabled: false + + ## If bringing your own PostgreSQL, the full uri to use + ## e.g. postgres://concourse:changeme@my-postgres.com:5432/concourse?sslmode=require + ## + uri: postgres://concourse:changeme@my-postgres.com:5432/concourse?sslmode=require + +``` diff --git a/stable/concourse/requirements.yaml b/stable/concourse/requirements.yaml index 8e2aa4661dde..3fcbb5423e6e 100644 --- a/stable/concourse/requirements.yaml +++ b/stable/concourse/requirements.yaml @@ -2,3 +2,4 @@ dependencies: - name: postgresql version: 0.3.0 repository: https://kubernetes-charts.storage.googleapis.com/ + condition: postgresql.enabled diff --git a/stable/concourse/templates/secrets.yaml b/stable/concourse/templates/secrets.yaml index 3205fac9a331..0178ad946a63 100644 --- a/stable/concourse/templates/secrets.yaml +++ b/stable/concourse/templates/secrets.yaml @@ -9,7 +9,11 @@ metadata: heritage: "{{ .Release.Service }}" type: Opaque data: + {{ if .Values.postgresql.enabled }} postgresql-user: {{ .Values.postgresql.postgresUser | b64enc | quote }} + {{ else }} + postgresql-uri: {{ .Values.postgresql.uri | b64enc | quote }} + {{ end }} basic-auth-username: {{ .Values.concourse.username | b64enc | quote }} basic-auth-password: {{ .Values.concourse.password | b64enc | quote }} host-key: {{ .Values.concourse.hostKey | b64enc | quote }} diff --git a/stable/concourse/templates/web-deployment.yaml b/stable/concourse/templates/web-deployment.yaml index 597eb19f4568..9e14bac36f2b 100644 --- a/stable/concourse/templates/web-deployment.yaml +++ b/stable/concourse/templates/web-deployment.yaml @@ -21,6 +21,7 @@ spec: args: - "web" env: + {{ if .Values.postgresql.enabled }} - name: POSTGRES_HOST valueFrom: configMapKeyRef: @@ -43,6 +44,13 @@ spec: key: postgresql-database - name: CONCOURSE_POSTGRES_DATA_SOURCE value: postgres://$(POSTGRES_USER):$(POSTGRES_PASSWORD)@$(POSTGRES_HOST)/$(POSTGRES_DATABASE)?sslmode=disable + {{ else }} + - name: CONCOURSE_POSTGRES_DATA_SOURCE + valueFrom: + secretKeyRef: + name: {{ template "concourse.fullname" . }} + key: postgresql-uri + {{ end }} - name: POD_IP valueFrom: fieldRef: diff --git a/stable/concourse/values.yaml b/stable/concourse/values.yaml index 44e0756c51a5..25806aac8dfd 100644 --- a/stable/concourse/values.yaml +++ b/stable/concourse/values.yaml @@ -350,6 +350,17 @@ persistence: ## ref: https://github.com/kubernetes/charts/blob/master/stable/postgresql/README.md ## postgresql: + + ## Use the PostgreSQL chart dependency. + ## Set to false if bringing your own PostgreSQL. + ## + enabled: true + + ## If bringing your own PostgreSQL, the full uri to use + ## e.g. postgres://concourse:changeme@my-postgres.com:5432/concourse?sslmode=disable + ## + # uri: + ### PostgreSQL User to create. ## postgresUser: concourse diff --git a/stable/consul/templates/consul-test.yaml b/stable/consul/templates/consul-test.yaml index b3129b3b80f0..293aa73b9249 100644 --- a/stable/consul/templates/consul-test.yaml +++ b/stable/consul/templates/consul-test.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: Pod metadata: - name: "{{.Release.Name}}-ui-test-{{ randAlphaNum 5 | lower }}" + name: "{{ .Release.Name }}-ui-test-{{ randAlphaNum 5 | lower }}" annotations: "helm.sh/hook": test-success "pod.beta.kubernetes.io/init-containers": '[ @@ -20,8 +20,8 @@ metadata: ]' spec: containers: - - name: {{.Release.Name}}-ui-test - image: {{.Values.test.image}}:{{.Values.test.imageTag}} + - name: {{ .Release.Name }}-ui-test + image: {{ .Values.test.image }}:{{ .Values.test.imageTag }} command: ["/tools/bats/bats", "-t", "/tests/run.sh"] volumeMounts: - mountPath: /tests diff --git a/stable/consul/templates/consul.yaml b/stable/consul/templates/consul.yaml index 39d6f1da525a..450e1841f6f2 100644 --- a/stable/consul/templates/consul.yaml +++ b/stable/consul/templates/consul.yaml @@ -12,17 +12,17 @@ kind: Service metadata: name: "{{ template "fullname" . }}-ui" labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" spec: ports: - name: http - port: {{.Values.HttpPort}} + port: {{ .Values.HttpPort }} selector: - component: "{{.Release.Name}}-{{.Values.Component}}" - type: "{{.Values.uiService.type}}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" + type: "{{ .Values.uiService.type }}" {{- end }} --- apiVersion: v1 @@ -30,58 +30,58 @@ kind: Service metadata: name: "{{ template "fullname" . }}" labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" annotations: service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" spec: ports: - name: http - port: {{.Values.HttpPort}} + port: {{ .Values.HttpPort }} - name: rpc - port: {{.Values.RpcPort}} + port: {{ .Values.RpcPort }} - name: serflan-tcp protocol: "TCP" - port: {{.Values.SerflanPort}} + port: {{ .Values.SerflanPort }} - name: serflan-udp protocol: "UDP" - port: {{.Values.SerflanUdpPort}} + port: {{ .Values.SerflanUdpPort }} - name: serfwan-tcp protocol: "TCP" - port: {{.Values.SerfwanPort}} + port: {{ .Values.SerfwanPort }} - name: serfwan-udp protocol: "UDP" - port: {{.Values.SerfwanUdpPort}} + port: {{ .Values.SerfwanUdpPort }} - name: server - port: {{.Values.ServerPort}} + port: {{ .Values.ServerPort }} - name: consuldns - port: {{.Values.ConsulDnsPort}} + port: {{ .Values.ConsulDnsPort }} clusterIP: None selector: - component: "{{.Release.Name}}-{{.Values.Component}}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" --- apiVersion: apps/v1beta1 kind: StatefulSet metadata: name: "{{ template "fullname" . }}" labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" spec: serviceName: "{{ template "fullname" . }}" - replicas: {{default 3 .Values.Replicas}} + replicas: {{ default 3 .Values.Replicas }} template: metadata: name: "{{ template "fullname" . }}" labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Component }}" annotations: pod.alpha.kubernetes.io/initialized: "true" spec: @@ -89,33 +89,33 @@ spec: fsGroup: 1000 containers: - name: "{{ template "fullname" . }}" - image: "{{.Values.Image}}:{{.Values.ImageTag}}" - imagePullPolicy: "{{.Values.ImagePullPolicy}}" + image: "{{ .Values.Image }}:{{ .Values.ImageTag }}" + imagePullPolicy: "{{ .Values.ImagePullPolicy }}" ports: - name: http - containerPort: {{.Values.HttpPort}} + containerPort: {{ .Values.HttpPort }} - name: rpc - containerPort: {{.Values.RpcPort}} + containerPort: {{ .Values.RpcPort }} - name: serflan-tcp protocol: "TCP" - containerPort: {{.Values.SerflanPort}} + containerPort: {{ .Values.SerflanPort }} - name: serflan-udp protocol: "UDP" - containerPort: {{.Values.SerflanUdpPort}} + containerPort: {{ .Values.SerflanUdpPort }} - name: serfwan-tcp protocol: "TCP" - containerPort: {{.Values.SerfwanPort}} + containerPort: {{ .Values.SerfwanPort }} - name: serfwan-udp protocol: "UDP" - containerPort: {{.Values.SerfwanUdpPort}} + containerPort: {{ .Values.SerfwanUdpPort }} - name: server - containerPort: {{.Values.ServerPort}} + containerPort: {{ .Values.ServerPort }} - name: consuldns - containerPort: {{.Values.ConsulDnsPort}} + containerPort: {{ .Values.ConsulDnsPort }} resources: requests: - cpu: "{{.Values.Cpu}}" - memory: "{{.Values.Memory}}" + cpu: "{{ .Values.Cpu }}" + memory: "{{ .Values.Memory }}" env: - name: INITIAL_CLUSTER_SIZE value: {{ default 3 .Values.Replicas | quote }} @@ -193,7 +193,7 @@ spec: name: datadir annotations: {{- if .Values.StorageClass }} - volume.beta.kubernetes.io/storage-class: {{.Values.StorageClass | quote}} + volume.beta.kubernetes.io/storage-class: {{ .Values.StorageClass | quote }} {{- else }} volume.alpha.kubernetes.io/storage-class: default {{- end }} @@ -203,4 +203,4 @@ spec: resources: requests: # upstream recommended max is 700M - storage: "{{.Values.Storage}}" + storage: "{{ .Values.Storage }}" diff --git a/stable/consul/templates/test-config.yaml b/stable/consul/templates/test-config.yaml index 4a8ae7a0055c..deb42e05524c 100644 --- a/stable/consul/templates/test-config.yaml +++ b/stable/consul/templates/test-config.yaml @@ -6,11 +6,11 @@ data: run.sh: |- @test "Testing Consul cluster has quorum" { for i in {0..2}; do - if [ `kubectl exec {{.Release.Name}}-consul-$i consul members --namespace={{.Release.Namespace}} | grep server | wc -l` -ge "3" ]; then - echo "{{.Release.Name}}-consul-$i OK. consul members returning at least 3 records." + if [ `kubectl exec {{ .Release.Name }}-consul-$i consul members --namespace={{ .Release.Namespace }} | grep server | wc -l` -ge "3" ]; then + echo "{{ .Release.Name }}-consul-$i OK. consul members returning at least 3 records." else - echo "{{.Release.Name}}-consul-$i ERROR. consul members returning less than 3 records." + echo "{{ .Release.Name }}-consul-$i ERROR. consul members returning less than 3 records." exit 1 fi done - } \ No newline at end of file + } diff --git a/stable/dask-distributed/.helmignore b/stable/dask-distributed/.helmignore new file mode 100644 index 000000000000..f0c131944441 --- /dev/null +++ b/stable/dask-distributed/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/stable/dask-distributed/Chart.yaml b/stable/dask-distributed/Chart.yaml new file mode 100755 index 000000000000..66f8924111d6 --- /dev/null +++ b/stable/dask-distributed/Chart.yaml @@ -0,0 +1,10 @@ +name: dask-distributed +version: 1.16.3 +description: Distributed computation in Python +home: https://github.com/dask/distributed +icon: https://avatars3.githubusercontent.com/u/17131925?v=3&s=200 +sources: + - https://github.com/dask/distributed +maintainers: + - name: danielfrg + email: df.rodriguez143@gmail.com diff --git a/stable/dask-distributed/README.md b/stable/dask-distributed/README.md new file mode 100644 index 000000000000..9413e0e97e0a --- /dev/null +++ b/stable/dask-distributed/README.md @@ -0,0 +1,83 @@ +# Dask Distributed Helm Chart + +Dask Distributed allows distributed computation in Python the chart also includes a single user Jupyter Notebook. + +* https://github.com/dask/distributed +* http://jupyter.org/ + +## Chart Details +This chart will do the following: + +* 1 x Dask scheduler with port 8786 (scheduler) and 80 (Web UI) exposed on an external LoadBalancer +* 3 x Dask workers that connect to the scheduler +* 1 x Jupyter notebook with port 80 exposed on an external LoadBalancer +* All using Kubernetes Deployments + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/dask-distributed +``` + +## Configuration + +The following tables lists the configurable parameters of the Dask chart and their default values. + +### Dask scheduler + +| Parameter | Description | Default | +| -------------------------- | ---------------------------------- | ---------------------------------------------------------- | +| `scheduler.name` | Dask master name | `dask-master` | +| `scheduler.image` | Container image name | `dask2/dask` | +| `scheduler.imageTag` | Container image tag | `latest` | +| `scheduler.replicas` | k8s deployment replicas | `1` | +| `scheduler.component` | k8s selector key | `dask-scheduler` | +| `scheduler.cpu` | container requested cpu | `500m` | +| `scheduler.containerPort` | Container listening port | `8786` | +| `scheduler.resources` | Container resources | `{}` | + +### Dask webUI + +| Parameter | Description | Default | +|-----------------------|----------------------------------|----------------------------------------------------------| +| `webUI.name` | Dask webui name | `dask-webui` | +| `webUI.servicePort` | k8s service port | `8787` | +| `webUI.containerPort` | Container listening port | `8787` | + +### Dask worker + +| Parameter | Description | Default | +| ----------------------- | ------------------------------------ | ---------------------------------------------------------- | +| `worker.name` | Dask worker name | `dask-worker` | +| `worker.image` | Container image name | `daskdev/dask` | +| `worker.imageTag` | Container image tag | `1.5.1_v3` | +| `worker.replicas` | k8s hpa and deployment replicas | `3` | +| `worker.replicasMax` | k8s hpa max replicas | `10` | +| `worker.component` | k8s selector key | `dask-worker` | +| `worker.containerPort` | Container listening port | `7077` | +| `worker.resources` | Container resources | `{}` | + +### jupyter + +| Parameter | Description | Default | +|-------------------------|----------------------------------|----------------------------------------------------------| +| `jupyter.name` | jupyter name | `jupyter` | +| `jupyter.image` | Container image name | `jupyter/base-notebook` | +| `jupyter.imageTag` | Container image tag | `11be019e4079` | +| `jupyter.replicas` | k8s deployment replicas | `1` | +| `jupyter.component` | k8s selector key | `jupyter` | +| `jupyter.servicePort` | k8s service port | `80` | +| `jupyter.containerPort` | Container listening port | `8888` | +| `jupyter.resources` | Container resources | `{}` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/dask +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/stable/dask-distributed/templates/NOTES.txt b/stable/dask-distributed/templates/NOTES.txt new file mode 100644 index 000000000000..e366356dc4b7 --- /dev/null +++ b/stable/dask-distributed/templates/NOTES.txt @@ -0,0 +1,25 @@ +1. Get the Dask Scheduler connection string by running this commands in the same shell: + + NOTE: It may take a few minutes for the LoadBalancer IP to be available, until that the commands below will not work. + You can watch the status by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "scheduler-fullname" . }}' + + export DASK_SCHEDULER=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "scheduler-fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$DASK_SCHEDULER:{{ .Values.scheduler.servicePort }} + +2. Get the Dask Scheduler Web UI by running this commands in the same shell: + + NOTE: It may take a few minutes for the LoadBalancer IP to be available, until that the commands below will not work. + You can watch the status by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "scheduler-fullname" . }}' + + export DASK_SCHEDULER_UI_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "scheduler-fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$DASK_SCHEDULER_UI_IP:{{ .Values.webUI.servicePort }} + +3. Get the Jupyter Notebook URL to visit by running these commands in the same shell: + + NOTE: It may take a few minutes for the LoadBalancer IP to be available, until that the commands below will not work. + You can watch the status by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "jupyter-fullname" . }}' + + NOTE: The default password to login is `dask`. + + export JUPYTER_NOTEBOOK_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "jupyter-fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$JUPYTER_NOTEBOOK_IP:{{ .Values.jupyter.servicePort }} diff --git a/stable/dask-distributed/templates/_helpers.tpl b/stable/dask-distributed/templates/_helpers.tpl new file mode 100644 index 000000000000..f3d10f775f37 --- /dev/null +++ b/stable/dask-distributed/templates/_helpers.tpl @@ -0,0 +1,31 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 24 -}} +{{- end -}} + +{{/* +Create fully qualified names. +We truncate at 24 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "scheduler-fullname" -}} +{{- $name := default .Chart.Name .Values.scheduler.name -}} +{{- printf "%s-%s" .Release.Name $name | trunc 24 -}} +{{- end -}} + +{{- define "webui-fullname" -}} +{{- $name := default .Chart.Name .Values.webUI.name -}} +{{- printf "%s-%s" .Release.Name $name | trunc 24 -}} +{{- end -}} + +{{- define "worker-fullname" -}} +{{- $name := default .Chart.Name .Values.worker.name -}} +{{- printf "%s-%s" .Release.Name $name | trunc 24 -}} +{{- end -}} + +{{- define "jupyter-fullname" -}} +{{- $name := default .Chart.Name .Values.jupyter.name -}} +{{- printf "%s-%s" .Release.Name $name | trunc 24 -}} +{{- end -}} diff --git a/stable/dask-distributed/templates/dask-jupyter-config.yaml b/stable/dask-distributed/templates/dask-jupyter-config.yaml new file mode 100644 index 000000000000..c241247f8ace --- /dev/null +++ b/stable/dask-distributed/templates/dask-jupyter-config.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "jupyter-fullname" . }}-config + labels: + app: {{ template "name" . }} + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.jupyter.component }}" +data: + jupyter_notebook_config.py: | + c = get_config() + c.NotebookApp.password = '{{ .Values.jupyter.password }}' diff --git a/stable/dask-distributed/templates/dask-jupyter-deployment.yaml b/stable/dask-distributed/templates/dask-jupyter-deployment.yaml new file mode 100644 index 000000000000..907c603929da --- /dev/null +++ b/stable/dask-distributed/templates/dask-jupyter-deployment.yaml @@ -0,0 +1,35 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "jupyter-fullname" . }} + labels: + app: {{ template "name" . }} + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.jupyter.component }}" +spec: + replicas: {{ .Values.jupyter.replicas }} + strategy: + type: RollingUpdate + template: + metadata: + labels: + app: {{ template "name" . }} + release: {{ .Release.Name | quote }} + component: "{{ .Release.Name }}-{{ .Values.jupyter.component }}" + spec: + containers: + - name: {{ template "jupyter-fullname" . }} + image: "{{ .Values.jupyter.image }}:{{ .Values.jupyter.imageTag }}" + ports: + - containerPort: {{ .Values.jupyter.containerPort }} + resources: +{{ toYaml .Values.jupyter.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /home/jovyan/.jupyter + volumes: + - name: config-volume + configMap: + name: {{ template "jupyter-fullname" . }}-config diff --git a/stable/dask-distributed/templates/dask-jupyter-service.yaml b/stable/dask-distributed/templates/dask-jupyter-service.yaml new file mode 100644 index 000000000000..993ef29c9d12 --- /dev/null +++ b/stable/dask-distributed/templates/dask-jupyter-service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "jupyter-fullname" . }} + labels: + app: {{ template "name" . }} + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.jupyter.component }}" +spec: + ports: + - port: {{ .Values.jupyter.servicePort }} + targetPort: {{ .Values.jupyter.containerPort }} + selector: + app: {{ template "name" . }} + release: {{ .Release.Name | quote }} + component: "{{ .Release.Name }}-{{ .Values.jupyter.component }}" + type: "LoadBalancer" diff --git a/stable/dask-distributed/templates/dask-scheduler-deployment.yaml b/stable/dask-distributed/templates/dask-scheduler-deployment.yaml new file mode 100644 index 000000000000..ee7427952318 --- /dev/null +++ b/stable/dask-distributed/templates/dask-scheduler-deployment.yaml @@ -0,0 +1,30 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "scheduler-fullname" . }} + labels: + app: {{ template "name" . }} + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.scheduler.component }}" +spec: + replicas: {{ .Values.scheduler.replicas }} + strategy: + type: RollingUpdate + template: + metadata: + labels: + app: {{ template "name" . }} + release: {{ .Release.Name | quote }} + component: "{{ .Release.Name }}-{{ .Values.scheduler.component }}" + spec: + containers: + - name: {{ template "scheduler-fullname" . }} + image: "{{ .Values.scheduler.image }}:{{ .Values.scheduler.imageTag }}" + command: ["dask-scheduler", "--port", "{{ .Values.scheduler.servicePort }}", "--bokeh-port", "{{ .Values.webUI.containerPort }}"] + ports: + - containerPort: {{ .Values.scheduler.containerPort }} + - containerPort: {{ .Values.webUI.containerPort }} + resources: +{{ toYaml .Values.scheduler.resources | indent 12 }} diff --git a/stable/dask-distributed/templates/dask-scheduler-service.yaml b/stable/dask-distributed/templates/dask-scheduler-service.yaml new file mode 100644 index 000000000000..df0559429b08 --- /dev/null +++ b/stable/dask-distributed/templates/dask-scheduler-service.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "scheduler-fullname" . }} + labels: + app: {{ template "name" . }} + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.scheduler.component }}" +spec: + ports: + - name: {{ template "scheduler-fullname" . }} + port: {{ .Values.scheduler.servicePort }} + targetPort: {{ .Values.scheduler.containerPort }} + - name: {{ template "webui-fullname" . }} + port: {{ .Values.webUI.servicePort }} + targetPort: {{ .Values.webUI.containerPort }} + selector: + app: {{ template "name" . }} + release: {{ .Release.Name | quote }} + component: "{{ .Release.Name }}-{{ .Values.scheduler.component }}" + type: "LoadBalancer" diff --git a/stable/dask-distributed/templates/dask-worker-deployment.yaml b/stable/dask-distributed/templates/dask-worker-deployment.yaml new file mode 100644 index 000000000000..f397b63344f4 --- /dev/null +++ b/stable/dask-distributed/templates/dask-worker-deployment.yaml @@ -0,0 +1,29 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "worker-fullname" . }} + labels: + app: {{ template "name" . }} + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.worker.component }}" +spec: + replicas: {{ .Values.worker.replicas }} + strategy: + type: RollingUpdate + template: + metadata: + labels: + app: {{ template "name" . }} + release: {{ .Release.Name | quote }} + component: "{{ .Release.Name }}-{{ .Values.worker.component }}" + spec: + containers: + - name: {{ template "worker-fullname" . }} + image: "{{ .Values.worker.image }}:{{ .Values.worker.imageTag }}" + command: ["dask-worker", "{{ template "scheduler-fullname" . }}:{{ .Values.scheduler.servicePort }}"] + ports: + - containerPort: {{ .Values.worker.containerPort }} + resources: +{{ toYaml .Values.worker.resources | indent 12 }} diff --git a/stable/dask-distributed/values.yaml b/stable/dask-distributed/values.yaml new file mode 100644 index 000000000000..0910b58bc582 --- /dev/null +++ b/stable/dask-distributed/values.yaml @@ -0,0 +1,59 @@ +# Default values for dask. +# This is a YAML-formatted file. +# Declare name/value pairs to be passed into your templates. +# name: value + +# nameOverride: dask + +scheduler: + name: scheduler + image: "daskdev/dask" + imageTag: "latest" + replicas: 1 + component: "dask-scheduler" + servicePort: 8786 + containerPort: 8786 + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 500m + # memory: 512Mi + +webUI: + name: webui + servicePort: 80 + containerPort: 8787 + +worker: + name: worker + image: "daskdev/dask" + imageTag: "latest" + replicas: 3 + component: "dask-worker" + containerPort: 8081 + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 500m + # memory: 512Mi + +jupyter: + name: jupyter + image: "jupyter/base-notebook" + imageTag: "11be019e4079" + replicas: 1 + component: "jupyter-notebook" + servicePort: 80 + containerPort: 8888 + password: 'sha1:aae8550c0a44:9507d45e087d5ee481a5ce9f4f16f37a0867318c' # 'dask' + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 500m + # memory: 512Mi diff --git a/stable/drupal/Chart.yaml b/stable/drupal/Chart.yaml index 0a7cdd149a16..1aad2931d2a1 100644 --- a/stable/drupal/Chart.yaml +++ b/stable/drupal/Chart.yaml @@ -1,5 +1,5 @@ name: drupal -version: 0.7.2 +version: 0.8.0 appVersion: 8.3.3 description: One of the most versatile open source content management systems. keywords: diff --git a/stable/drupal/README.md b/stable/drupal/README.md index 324723bb5c95..99261527d324 100644 --- a/stable/drupal/README.md +++ b/stable/drupal/README.md @@ -69,6 +69,8 @@ The following tables lists the configurable parameters of the Drupal chart and t | `persistence.drupal.hostPath` | Host mount path for Drupal volume | `nil` (will not mount to a host path) | | `persistence.drupal.size` | PVC Storage Request for Drupal volume | `8Gi` | | `resources` | CPU/Memory resource requests/limits | Memory: `512Mi`, CPU: `300m` | +| `volumeMounts.drupal.mountPath` | Drupal data volume mount path | `/bitnami/drupal` | +| `volumeMounts.apache.mountPath` | Apache data volume mount path | `/bitnami/apache` | The above parameters map to the env variables defined in [bitnami/drupal](http://github.com/bitnami/bitnami-docker-drupal). For more information please refer to the [bitnami/drupal](http://github.com/bitnami/bitnami-docker-drupal) image documentation. @@ -110,8 +112,9 @@ helm install --name my-release -f values.yaml stable/drupal ``` ## Persistence +The configured image must store Drupal data and Apache configurations in separate paths of the container. -The [Bitnami Drupal](https://github.com/bitnami/bitnami-docker-drupal) image stores the Drupal data and configurations at the `/bitnami/drupal` and `/bitnami/apache` paths of the container. +The [Bitnami Drupal](https://github.com/bitnami/bitnami-docker-drupal) image stores the Drupal data and Apache configurations at the `/bitnami/drupal` and `/bitnami/apache` paths of the container. If you wish to override the `image` value, and your image stores this data and configurations in different paths, you may specify these paths with `volumeMounts.drupal.mountPath` and `volumeMounts.apache.mountPath`. Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Configuration](#configuration) section to configure the PVC or to disable persistence. diff --git a/stable/drupal/requirements.lock b/stable/drupal/requirements.lock index a7fbe2b0916f..dfa7bccb1dd4 100644 --- a/stable/drupal/requirements.lock +++ b/stable/drupal/requirements.lock @@ -5,6 +5,6 @@ dependencies: name: mariadb repository: https://kubernetes-charts.storage.googleapis.com/ tags: null - version: 0.6.2 -digest: sha256:66acb700f673b56045b00d0b65e3ab750f12941005e7631d69bd4101f51424ab -generated: 2017-05-18T13:23:39.649401832-04:00 + version: 0.6.3 +digest: sha256:99135a083bee8717224e9f5a4e151b2831bc8367b1b88075b6405dd190ac7a11 +generated: 2017-06-22T19:36:45.769748804-04:00 diff --git a/stable/drupal/requirements.yaml b/stable/drupal/requirements.yaml index e12e2163630d..d376de89806e 100644 --- a/stable/drupal/requirements.yaml +++ b/stable/drupal/requirements.yaml @@ -1,4 +1,4 @@ dependencies: - name: mariadb - version: 0.6.2 + version: 0.6.3 repository: https://kubernetes-charts.storage.googleapis.com/ diff --git a/stable/drupal/templates/apache-pvc.yaml b/stable/drupal/templates/apache-pvc.yaml index 166feb8b9f47..db3bc7fdd066 100644 --- a/stable/drupal/templates/apache-pvc.yaml +++ b/stable/drupal/templates/apache-pvc.yaml @@ -1,4 +1,4 @@ -{{- if .Values.persistence.enabled -}} +{{- if and .Values.persistence.enabled .Values.volumeMounts.apache.mountPath -}} kind: PersistentVolumeClaim apiVersion: v1 metadata: diff --git a/stable/drupal/templates/deployment.yaml b/stable/drupal/templates/deployment.yaml index a4477d2854a0..14be58a91c66 100644 --- a/stable/drupal/templates/deployment.yaml +++ b/stable/drupal/templates/deployment.yaml @@ -56,9 +56,11 @@ spec: {{ toYaml .Values.resources | indent 10 }} volumeMounts: - name: drupal-data - mountPath: /bitnami/drupal + mountPath: {{ .Values.volumeMounts.drupal.mountPath }} + {{- if .Values.volumeMounts.apache.mountPath }} - name: apache-data - mountPath: /bitnami/apache + mountPath: {{ .Values.volumeMounts.apache.mountPath }} + {{- end }} {{- if .Values.imagePullSecrets }} imagePullSecrets: {{ toYaml .Values.imagePullSecrets | indent 8 }} @@ -71,6 +73,7 @@ spec: {{- else }} emptyDir: {} {{- end }} + {{- if .Values.volumeMounts.apache.mountPath }} - name: apache-data {{- if .Values.persistence.enabled }} persistentVolumeClaim: @@ -78,3 +81,4 @@ spec: {{- else }} emptyDir: {} {{- end }} + {{- end }} diff --git a/stable/drupal/values.yaml b/stable/drupal/values.yaml index 63e2fc8e6dc6..316cd04546a4 100644 --- a/stable/drupal/values.yaml +++ b/stable/drupal/values.yaml @@ -120,3 +120,13 @@ resources: requests: memory: 512Mi cpu: 300m + +## Configure volume mounts. This is useful for images whose data mount paths are +## different than the default. +## Setting volumeMounts.apache.mountPath to "" prevents Apache config mount. +## +volumeMounts: + drupal: + mountPath: /bitnami/drupal + apache: + mountPath: /bitnami/apache diff --git a/stable/etcd-operator/Chart.yaml b/stable/etcd-operator/Chart.yaml index 91f3f1760135..44a7f6a5d55e 100755 --- a/stable/etcd-operator/Chart.yaml +++ b/stable/etcd-operator/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 description: CoreOS etcd-operator Helm chart for Kubernetes name: etcd-operator -version: 0.4.0 +version: 0.4.1 appVersion: 0.3.2 home: https://github.com/coreos/etcd-operator icon: https://raw.githubusercontent.com/coreos/etcd/master/logos/etcd-horizontal-color.png diff --git a/stable/etcd-operator/templates/deployment.yaml b/stable/etcd-operator/templates/deployment.yaml index 598229f00c8a..bea9257682a5 100644 --- a/stable/etcd-operator/templates/deployment.yaml +++ b/stable/etcd-operator/templates/deployment.yaml @@ -27,7 +27,11 @@ spec: command: - "/bin/sh" - "-c" - - "/usr/local/bin/etcd-operator --pv-provisioner={{ .Values.cluster.backup.provisioner }}" + - "/usr/local/bin/etcd-operator" + - "--pv-provisioner={{ .Values.cluster.backup.provisioner }}" +{{- range $key, $value := .Values.commandArgs }} + - "--{{ $key }}={{ $value }}" +{{- end }} env: - name: MY_POD_NAMESPACE valueFrom: diff --git a/stable/etcd-operator/values.yaml b/stable/etcd-operator/values.yaml index c8fbdb08539a..3c8f0fba3fa2 100644 --- a/stable/etcd-operator/values.yaml +++ b/stable/etcd-operator/values.yaml @@ -13,6 +13,12 @@ resources: requests: cpu: 100m memory: 128Mi + +## etcd-operator specific values +## additional command arguments go here; will be translated to `--key=value` form +commandArgs: + # analytics: true + ## etcd-cluster specific values cluster: enabled: false diff --git a/stable/factorio/templates/deployment.yaml b/stable/factorio/templates/deployment.yaml index c91224fa6992..3e85be846b64 100644 --- a/stable/factorio/templates/deployment.yaml +++ b/stable/factorio/templates/deployment.yaml @@ -78,7 +78,7 @@ spec: - name: rcon containerPort: {{ .Values.factorioServer.rcon.port }} protocol: TCP - {{- end}} + {{- end }} volumeMounts: - name: saves mountPath: /opt/factorio/saves @@ -88,14 +88,14 @@ spec: - name: saves {{- if .Values.persistence.savedGames.enabled }} persistentVolumeClaim: - claimName: {{template "fullname" .}}-savedgames + claimName: {{ template "fullname" . }}-savedgames {{- else }} emptyDir: {} {{- end }} - name: mods {{- if .Values.persistence.mods.enabled }} persistentVolumeClaim: - claimName: {{template "fullname" .}}-mods + claimName: {{ template "fullname" . }}-mods {{- else }} emptyDir: {} {{- end }} diff --git a/stable/fluent-bit/Chart.yaml b/stable/fluent-bit/Chart.yaml index 6207632d88bb..9b0430ac9b4e 100755 --- a/stable/fluent-bit/Chart.yaml +++ b/stable/fluent-bit/Chart.yaml @@ -1,16 +1,18 @@ name: fluent-bit -version: 0.1.0 -appVersion: 0.11.8 +version: 0.1.3 +appVersion: 0.11.12 description: Fast and Lightweight Log/Data Forwarder for Linux, BSD and OSX keywords: -- fluent -- bit + - logging + - monitoring + - fluent + - fluentd sources: -- http://fluentbit.io + - http://fluentbit.io icon: http://fluentbit.io/assets/img/logo1-default.png home: http://fluentbit.io maintainers: -- name: edsiper - email: eduardo@treasure-data.com -- name: kfox1111 - email: Kevin.Fox@pnnl.gov + - name: kfox1111 + email: Kevin.Fox@pnnl.gov + - name: edsiper + email: eduardo@treasure-data.com diff --git a/stable/fluent-bit/templates/daemonset.yaml b/stable/fluent-bit/templates/daemonset.yaml index 23658b84590e..11083435409d 100644 --- a/stable/fluent-bit/templates/daemonset.yaml +++ b/stable/fluent-bit/templates/daemonset.yaml @@ -13,7 +13,7 @@ spec: containers: - name: fluent-bit image: "{{ .Values.image.fluent_bit.repository }}:{{ .Values.image.fluent_bit.tag }}" - imagePullPolicy: "{{ .Values.image.pullPolicy}}" + imagePullPolicy: "{{ .Values.image.pullPolicy }}" env: {{ toYaml .Values.env | indent 10 }} resources: diff --git a/stable/fluent-bit/values.yaml b/stable/fluent-bit/values.yaml index 23147c0ecd75..2f6502602ea3 100644 --- a/stable/fluent-bit/values.yaml +++ b/stable/fluent-bit/values.yaml @@ -5,7 +5,7 @@ on_minikube: false image: fluent_bit: repository: fluent/fluent-bit - tag: 0.11.8 + tag: 0.11.12 pullPolicy: IfNotPresent backend: diff --git a/stable/g2/.helmignore b/stable/g2/.helmignore new file mode 100644 index 000000000000..c13e3c8fbb2f --- /dev/null +++ b/stable/g2/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj \ No newline at end of file diff --git a/stable/g2/Chart.yaml b/stable/g2/Chart.yaml new file mode 100755 index 000000000000..91d5650f634a --- /dev/null +++ b/stable/g2/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +description: G2 by AppsCode - Gearman in Golang +name: g2 +version: 0.1.0 +appVersion: 0.5.0 +home: https://github.com/appscode/g2 +icon: https://cdn.appscode.com/images/icon/g2.png +sources: + - https://github.com/appscode/g2 +maintainers: + - name: appscode + email: support@appscode.com diff --git a/stable/g2/README.md b/stable/g2/README.md new file mode 100644 index 000000000000..d92069607e6a --- /dev/null +++ b/stable/g2/README.md @@ -0,0 +1,97 @@ +# G2 +[G2 by AppsCode](https://github.com/appscode/g2) is a modern implementation of Gearman server in GO. +## TL;DR; + +```bash +$ helm install stable/g2 +``` + +## Introduction + +This chart bootstraps a [Gearman server](https://github.com/appscode/g2) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + + +## Prerequisites + +- Kubernetes 1.3+ + +## Installing the Chart +To install the chart with the release name `my-release`: +```bash +$ helm install --name my-release stable/g2 +``` +The command deploys G2 Gearman server on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release`: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables lists the configurable parameters of the Stash chart and their default values. + + +| Parameter | Description | Default | +| ------------------------ | ----------------------------------------------------------------- | ------------------- | +| `replicaCount` | Number of stash operator replicas to create | `1` | +| `g2.image` | G2 container image | `appscode/gearmand` | +| `g2.tag` | G2 container image tag | `0.5.0` | +| `g2.pullPolicy` | G2 container image pull policy | `IfNotPresent` | +| `g2.serviceType` | G2 service type | `ClusterIP` | +| `rbac.install` | install required rbac service account, roles and rolebindings | `false` | +| `rbac.apiVersion` | rbac api version `v1alpha1|v1beta1` | `v1beta1` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example: + +```bash +$ helm install --name my-release --set image.tag=v0.2.1 stable/g2 +``` + +Alternatively, a YAML file that specifies the values for the parameters can be provided while +installing the chart. For example: + +```bash +$ helm install --name my-release --values values.yaml stable/g2 +``` + +## RBAC +By default the chart will not install the recommended RBAC roles and rolebindings. + +You need to have the following parameter on the api server. See the following document for how to enable [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/) + +``` +--authorization-mode=RBAC +``` + +To determine if your cluster supports RBAC, run the the following command: + +```console +$ kubectl api-versions | grep rbac +``` + +If the output contains "alpha" and/or "beta", you can may install the chart with RBAC enabled (see below). + +### Enable RBAC role/rolebinding creation + +To enable the creation of RBAC resources (On clusters with RBAC). Do the following: + +```console +$ helm install --name my-release stable/g2 --set rbac.install=true +``` + +### Changing RBAC manifest apiVersion + +By default the RBAC resources are generated with the "v1beta1" apiVersion. To use "v1alpha1" do the following: + +```console +$ helm install --name my-release stable/g2 --set rbac.install=true,rbac.apiVersion=v1alpha1 +``` diff --git a/stable/g2/templates/NOTES.txt b/stable/g2/templates/NOTES.txt new file mode 100644 index 000000000000..14ba91612be4 --- /dev/null +++ b/stable/g2/templates/NOTES.txt @@ -0,0 +1,3 @@ +To verify that G2 has started, run: + + kubectl --namespace={{ .Release.Namespace }} get deployments -l "release={{ .Release.Name }}, app={{ template "name" . }}" diff --git a/stable/g2/templates/_helpers.tpl b/stable/g2/templates/_helpers.tpl new file mode 100644 index 000000000000..f0d83d2edba6 --- /dev/null +++ b/stable/g2/templates/_helpers.tpl @@ -0,0 +1,16 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/stable/g2/templates/cluster-role-binding.yaml b/stable/g2/templates/cluster-role-binding.yaml new file mode 100644 index 000000000000..0ef44d37c418 --- /dev/null +++ b/stable/g2/templates/cluster-role-binding.yaml @@ -0,0 +1,20 @@ +{{ if .Values.rbac.install }} +{{- $serviceName := include "fullname" . -}} +apiVersion: rbac.authorization.k8s.io/{{ required "A valid .Values.rbac.apiVersion entry required!" .Values.rbac.apiVersion }} +kind: ClusterRoleBinding +metadata: + name: {{ $serviceName }} + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: "{{ template "fullname" . }}" + heritage: "{{ .Release.Service }}" + release: "{{ .Release.Name }}" +subjects: +- kind: ServiceAccount + name: {{ $serviceName }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ $serviceName }} +{{ end }} diff --git a/stable/g2/templates/cluster-role.yaml b/stable/g2/templates/cluster-role.yaml new file mode 100644 index 000000000000..cd16b1f459a0 --- /dev/null +++ b/stable/g2/templates/cluster-role.yaml @@ -0,0 +1,26 @@ +{{ if .Values.rbac.install }} +{{- $serviceName := include "fullname" . -}} +apiVersion: rbac.authorization.k8s.io/{{ required "A valid .Values.rbac.apiVersion entry required!" .Values.rbac.apiVersion }} +kind: ClusterRole +metadata: + name: {{ $serviceName }} + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: "{{ template "fullname" . }}" + heritage: "{{ .Release.Service }}" + release: "{{ .Release.Name }}" +rules: +- apiGroups: + - extensions + resources: + - deployments + verbs: ["get", "create", "update"] +- apiGroups: [""] + resources: + - services + verbs: ["create", "update"] +- apiGroups: [""] + resources: + - pods + verbs: ["list", "create", "update", "delete"] +{{ end }} diff --git a/stable/g2/templates/deployment.yaml b/stable/g2/templates/deployment.yaml new file mode 100644 index 000000000000..794244523bb5 --- /dev/null +++ b/stable/g2/templates/deployment.yaml @@ -0,0 +1,41 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "fullname" . }} + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: "{{ template "name" . }}" + heritage: "{{ .Release.Service }}" + release: "{{ .Release.Name }}" +spec: + replicas: {{ .Values.replicaCount }} + template: + metadata: + labels: + app: "{{ template "name" . }}" + release: "{{ .Release.Name }}" + spec: +{{- if .Values.rbac.install }} + serviceAccountName: {{ template "fullname" . }} +{{- end }} + containers: + - args: + - run + - --storage-dir=/var/db + - --v=5 + image: '{{ .Values.g2.image }}:{{ .Values.g2.tag }}' + imagePullPolicy: '{{ .Values.g2.pullPolicy }}' + name: gearman-server + ports: + - containerPort: 4730 + name: http + protocol: TCP + - containerPort: 3000 + name: restapi + protocol: TCP + volumeMounts: + - mountPath: /var/db + name: data-volume + volumes: + - emptyDir: {} + name: data-volume diff --git a/stable/g2/templates/service-account.yaml b/stable/g2/templates/service-account.yaml new file mode 100644 index 000000000000..8ff9087648f2 --- /dev/null +++ b/stable/g2/templates/service-account.yaml @@ -0,0 +1,11 @@ +{{ if .Values.rbac.install }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "fullname" . }} + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: "{{ template "fullname" . }}" + heritage: "{{ .Release.Service }}" + release: "{{ .Release.Name }}" +{{ end }} diff --git a/stable/g2/templates/service.yaml b/stable/g2/templates/service.yaml new file mode 100644 index 000000000000..30792873949d --- /dev/null +++ b/stable/g2/templates/service.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "fullname" . }} + labels: + app: "{{ template "name" . }}" + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + ports: + - name: http + port: 4730 + protocol: TCP + targetPort: http + - name: restapi + port: 3000 + protocol: TCP + targetPort: restapi + selector: + app: "{{ template "name" . }}" + release: "{{ .Release.Name }}" + type: '{{ .Values.g2.serviceType }}' diff --git a/stable/g2/values.yaml b/stable/g2/values.yaml new file mode 100644 index 000000000000..8bb2adf8cb47 --- /dev/null +++ b/stable/g2/values.yaml @@ -0,0 +1,9 @@ +replicaCount: 1 +g2: + image: appscode/gearmand + pullPolicy: IfNotPresent + tag: 0.5.0 + serviceType: ClusterIP +rbac: + install: false + apiVersion: v1beta1 diff --git a/stable/ghost/Chart.yaml b/stable/ghost/Chart.yaml index f0c2dc8d6226..9f85417cdafd 100644 --- a/stable/ghost/Chart.yaml +++ b/stable/ghost/Chart.yaml @@ -1,5 +1,5 @@ name: ghost -version: 0.4.10 +version: 0.4.11 description: A simple, powerful publishing platform that allows you to share your stories with the world keywords: - ghost diff --git a/stable/ghost/requirements.lock b/stable/ghost/requirements.lock index 7e4de35714e4..d6cee15b07e8 100644 --- a/stable/ghost/requirements.lock +++ b/stable/ghost/requirements.lock @@ -5,6 +5,6 @@ dependencies: name: mariadb repository: https://kubernetes-charts.storage.googleapis.com/ tags: null - version: 0.6.2 -digest: sha256:66acb700f673b56045b00d0b65e3ab750f12941005e7631d69bd4101f51424ab -generated: 2017-05-18T13:23:40.081218709-04:00 + version: 0.6.3 +digest: sha256:99135a083bee8717224e9f5a4e151b2831bc8367b1b88075b6405dd190ac7a11 +generated: 2017-06-22T19:36:46.292611341-04:00 diff --git a/stable/ghost/requirements.yaml b/stable/ghost/requirements.yaml index e12e2163630d..d376de89806e 100644 --- a/stable/ghost/requirements.yaml +++ b/stable/ghost/requirements.yaml @@ -1,4 +1,4 @@ dependencies: - name: mariadb - version: 0.6.2 + version: 0.6.3 repository: https://kubernetes-charts.storage.googleapis.com/ diff --git a/stable/ghost/templates/NOTES.txt b/stable/ghost/templates/NOTES.txt index 632f42418b98..51bda81893f0 100644 --- a/stable/ghost/templates/NOTES.txt +++ b/stable/ghost/templates/NOTES.txt @@ -18,7 +18,7 @@ host. To configure Ghost with the URL of your service: NOTE: It may take a few minutes for the LoadBalancer IP to be available. Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "fullname" . }}' - export APP_HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + export APP_HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") {{- end }} 2. Complete your Ghost deployment by running: diff --git a/stable/grafana/templates/deployment.yaml b/stable/grafana/templates/deployment.yaml index bf37853d8625..82ded927977c 100644 --- a/stable/grafana/templates/deployment.yaml +++ b/stable/grafana/templates/deployment.yaml @@ -3,7 +3,7 @@ kind: Deployment metadata: labels: app: {{ template "grafana.fullname" . }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" component: "{{ .Values.server.name }}" heritage: "{{ .Release.Service }}" release: "{{ .Release.Name }}" diff --git a/stable/grafana/templates/svc.yaml b/stable/grafana/templates/svc.yaml index a2e64c48ed4d..1334efec77e1 100644 --- a/stable/grafana/templates/svc.yaml +++ b/stable/grafana/templates/svc.yaml @@ -27,7 +27,7 @@ spec: {{- if .Values.server.loadBalancerIP }} loadBalancerIP: {{ .Values.server.loadBalancerIP }} {{- end -}} - {{- if .Values.server.loadBalancerSourceRanges}} + {{- if .Values.server.loadBalancerSourceRanges }} loadBalancerSourceRanges: {{- range .Values.server.loadBalancerSourceRanges }} - {{ . }} diff --git a/stable/heapster/Chart.yaml b/stable/heapster/Chart.yaml index ee0657bb7bca..c2e7d9ee5bd1 100644 --- a/stable/heapster/Chart.yaml +++ b/stable/heapster/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 description: Heapster enables Container Cluster Monitoring and Performance Analysis. name: heapster -version: 0.1.0 +version: 0.1.1 sources: - https://github.com/kubernetes/heapster - https://github.com/kubernetes/contrib/tree/master/addon-resizer diff --git a/stable/heapster/templates/_helpers.tpl b/stable/heapster/templates/_helpers.tpl index afb73b4075cf..a75a8d5268ca 100644 --- a/stable/heapster/templates/_helpers.tpl +++ b/stable/heapster/templates/_helpers.tpl @@ -20,6 +20,5 @@ Create a service name that defaults to app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). */}} {{- define "service.fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- .Values.service.nameOverride | default (printf "%s-%s" .Release.Name $name) | trunc 63 | trimSuffix "-" -}} +{{- .Values.service.nameOverride | default .Chart.Name }} {{- end -}} diff --git a/stable/heapster/templates/deployment.yaml b/stable/heapster/templates/deployment.yaml index 3544550deca3..020bcde75e73 100644 --- a/stable/heapster/templates/deployment.yaml +++ b/stable/heapster/templates/deployment.yaml @@ -4,6 +4,9 @@ metadata: name: {{ template "fullname" . }} labels: chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + {{- range $key, $value := .Values.labels }} + {{ $key }}: {{ $value }} + {{- end }} spec: replicas: {{ .Values.replicaCount }} template: diff --git a/stable/heapster/templates/service.yaml b/stable/heapster/templates/service.yaml index 6dcf31312545..693094dc3c1f 100644 --- a/stable/heapster/templates/service.yaml +++ b/stable/heapster/templates/service.yaml @@ -7,6 +7,9 @@ metadata: app: {{ template "fullname" . }} heritage: "{{ .Release.Service }}" release: "{{ .Release.Name }}" + {{- range $key, $value := .Values.service.labels }} + {{ $key }}: {{ $value }} + {{- end }} spec: type: {{ .Values.service.type }} ports: diff --git a/stable/heapster/values.yaml b/stable/heapster/values.yaml index 2cef479eeb98..ab703d7544e8 100644 --- a/stable/heapster/values.yaml +++ b/stable/heapster/values.yaml @@ -5,13 +5,26 @@ image: repository: gcr.io/google_containers/heapster tag: v1.3.0 pullPolicy: IfNotPresent +## Here labels can be added to the heapster deployment +# labels: +# kubernetes.io/cluster-service: "true" +# kubernetes.io/name: "Heapster" +labels: + service: type: ClusterIP externalPort: 8082 internalPort: 8082 ## This allows an overide of the heapster service name - ## Default: {{ template "fullname" . }} + ## Default: {{ .Chart.Name }} # nameOverride: + + ## Here labels can be added to the heapster service + # labels: + # kubernetes.io/cluster-service: "true" + # kubernetes.io/name: "Heapster" + labels: + resources: limits: cpu: 100m diff --git a/stable/influxdb/Chart.yaml b/stable/influxdb/Chart.yaml index 54adffd88075..5a5b798cca47 100755 --- a/stable/influxdb/Chart.yaml +++ b/stable/influxdb/Chart.yaml @@ -1,5 +1,5 @@ name: influxdb -version: 0.4.1 +version: 0.4.2 description: Scalable datastore for metrics, events, and real-time analytics. keywords: - influxdb diff --git a/stable/influxdb/README.md b/stable/influxdb/README.md index e2f1c9061edc..2e13645f6469 100644 --- a/stable/influxdb/README.md +++ b/stable/influxdb/README.md @@ -69,7 +69,7 @@ $ helm install --name my-release -f values.yaml stable/influxdb The [InfluxDB](https://hub.docker.com/_/influxdb/) image stores data in the `/var/lib/influxdb` directory in the container. -The chart mounts a [Persistent Volume](kubernetes.io/docs/user-guide/persistent-volumes/) volume at this location. The volume is created using dynamic volume provisioning. +The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) volume at this location. The volume is created using dynamic volume provisioning. ## Starting with authentication diff --git a/stable/influxdb/templates/deployment.yaml b/stable/influxdb/templates/deployment.yaml index 25d760f89f78..fdfa2d3fbc8b 100644 --- a/stable/influxdb/templates/deployment.yaml +++ b/stable/influxdb/templates/deployment.yaml @@ -23,23 +23,23 @@ spec: ports: - name: api containerPort: {{ .Values.config.http.bind_address }} - {{- if .Values.config.admin.enabled -}} + {{ if .Values.config.admin.enabled -}} - name: admin containerPort: {{ .Values.config.admin.bind_address }} {{- end }} - {{- if .Values.config.graphite.enabled -}} + {{ if .Values.config.graphite.enabled -}} - name: graphite containerPort: {{ .Values.config.graphite.bind_address }} {{- end }} - {{- if .Values.config.collectd.enabled -}} + {{ if .Values.config.collectd.enabled -}} - name: collectd containerPort: {{ .Values.config.collectd.bind_address }} {{- end }} - {{- if .Values.config.udp.enabled -}} + {{ if .Values.config.udp.enabled -}} - name: udp containerPort: {{ .Values.config.udp.bind_address }} {{- end }} - {{- if .Values.config.opentsdb.enabled -}} + {{ if .Values.config.opentsdb.enabled -}} - name: opentsdb containerPort: {{ .Values.config.opentsdb.bind_address }} {{- end }} diff --git a/stable/jasperreports/Chart.yaml b/stable/jasperreports/Chart.yaml index 49a48c681cc5..37472f93cc15 100644 --- a/stable/jasperreports/Chart.yaml +++ b/stable/jasperreports/Chart.yaml @@ -1,5 +1,6 @@ name: jasperreports -version: 0.1.7 +version: 0.1.9 +appVersion: 6.4.0 description: The JasperReports server can be used as a stand-alone or embedded reporting and BI server that offers web-based reporting, analytic tools and visualization, and a dashboard feature for compiling multiple custom views keywords: - business intelligence @@ -12,7 +13,7 @@ home: http://community.jaspersoft.com/project/jasperreports-server sources: - https://github.com/bitnami/bitnami-docker-jasperreports maintainers: -- name: Bitnami +- name: bitnami-bot email: containers@bitnami.com engine: gotpl icon: https://bitnami.com/assets/stacks/jasperserver/img/jasperserver-stack-110x117.png diff --git a/stable/jasperreports/requirements.lock b/stable/jasperreports/requirements.lock index 213b1f833fbd..c451b392b5f5 100644 --- a/stable/jasperreports/requirements.lock +++ b/stable/jasperreports/requirements.lock @@ -5,6 +5,6 @@ dependencies: name: mariadb repository: https://kubernetes-charts.storage.googleapis.com/ tags: null - version: 0.6.2 -digest: sha256:66acb700f673b56045b00d0b65e3ab750f12941005e7631d69bd4101f51424ab -generated: 2017-05-18T13:23:40.42910615-04:00 + version: 0.6.3 +digest: sha256:99135a083bee8717224e9f5a4e151b2831bc8367b1b88075b6405dd190ac7a11 +generated: 2017-06-22T19:36:46.714919074-04:00 diff --git a/stable/jasperreports/requirements.yaml b/stable/jasperreports/requirements.yaml index e12e2163630d..d376de89806e 100644 --- a/stable/jasperreports/requirements.yaml +++ b/stable/jasperreports/requirements.yaml @@ -1,4 +1,4 @@ dependencies: - name: mariadb - version: 0.6.2 + version: 0.6.3 repository: https://kubernetes-charts.storage.googleapis.com/ diff --git a/stable/jasperreports/values.yaml b/stable/jasperreports/values.yaml index 483c65c34272..21543c56997a 100644 --- a/stable/jasperreports/values.yaml +++ b/stable/jasperreports/values.yaml @@ -1,7 +1,7 @@ ## Bitnami JasperReports image version ## ref: https://hub.docker.com/r/bitnami/jasperreports/tags/ ## -image: bitnami/jasperreports:6.3.0-r11 +image: bitnami/jasperreports:6.4.0-r0 ## Specify a imagePullPolicy ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images diff --git a/stable/jenkins/Chart.yaml b/stable/jenkins/Chart.yaml index ba4657cd1d66..2f89694f426f 100755 --- a/stable/jenkins/Chart.yaml +++ b/stable/jenkins/Chart.yaml @@ -1,7 +1,7 @@ name: jenkins home: https://jenkins.io/ -version: 0.7.4 -appVersion: 2.46.3 +version: 0.8.1 +appVersion: 2.67 description: Open source continuous integration server. It supports multiple SCM tools including CVS, Subversion and Git. It can execute Apache Ant and Apache Maven-based projects as well as arbitrary scripts. sources: - https://github.com/jenkinsci/jenkins diff --git a/stable/jenkins/README.md b/stable/jenkins/README.md index f27e18ec098f..d6d414ddd873 100644 --- a/stable/jenkins/README.md +++ b/stable/jenkins/README.md @@ -26,40 +26,46 @@ The following tables lists the configurable parameters of the Jenkins chart and ### Jenkins Master - -| Parameter | Description | Default | -| --------------------------------- | ----------------------------------- | ---------------------------------------------------------------------------- | -| `Master.Name` | Jenkins master name | `jenkins-master` | -| `Master.Image` | Master image name | `jenkinsci/jenkins` | -| `Master.ImageTag` | Master image tag | `2.46.1` | -| `Master.ImagePullPolicy` | Master image pull policy | `Always` | -| `Master.Component` | k8s selector key | `jenkins-master` | -| `Master.Cpu` | Master requested cpu | `200m` | -| `Master.Memory` | Master requested memory | `256Mi` | -| `Master.ServiceType` | k8s service type | `LoadBalancer` | -| `Master.ServicePort` | k8s service port | `8080` | -| `Master.NodePort` | k8s node port | Not set | -| `Master.ContainerPort` | Master listening port | `8080` | -| `Master.SlaveListenerPort` | Listening port for agents | `50000` | -| `Master.LoadBalancerSourceRanges` | Allowed inbound IP addresses | `0.0.0.0/0` | -| `Master.JMXPort` | Open a port, for JMX stats | Not set | -| `Master.CustomConfigMap` | Use a custom ConfigMap | `false` | -| `Master.Ingress.Annotations` | Ingress annotations | `{}` | -| `Master.Ingress.TLS` | Ingress TLS configuration | `[]` | -| `Master.InitScripts` | List of Jenkins init scripts | Not set | -| `Master.InstallPlugins` | List of Jenkins plugins to install | `kubernetes:0.11 workflow-aggregator:2.5 credentials-binding:1.11 git:3.2.0` | -| `Master.ScriptApproval` | List of groovy functions to approve | Not set | +| Parameter | Description | Default | +| --------------------------------- | ------------------------------------ | ---------------------------------------------------------------------------- | +| `Master.Name` | Jenkins master name | `jenkins-master` | +| `Master.Image` | Master image name | `jenkinsci/jenkins` | +| `Master.ImageTag` | Master image tag | `2.46.1` | +| `Master.ImagePullPolicy` | Master image pull policy | `Always` | +| `Master.Component` | k8s selector key | `jenkins-master` | +| `Master.Cpu` | Master requested cpu | `200m` | +| `Master.Memory` | Master requested memory | `256Mi` | +| `Master.ServiceType` | k8s service type | `LoadBalancer` | +| `Master.ServicePort` | k8s service port | `8080` | +| `Master.NodePort` | k8s node port | Not set | +| `Master.ContainerPort` | Master listening port | `8080` | +| `Master.SlaveListenerPort` | Listening port for agents | `50000` | +| `Master.LoadBalancerSourceRanges` | Allowed inbound IP addresses | `0.0.0.0/0` | +| `Master.JMXPort` | Open a port, for JMX stats | Not set | +| `Master.CustomConfigMap` | Use a custom ConfigMap | `false` | +| `Master.Ingress.Annotations` | Ingress annotations | `{}` | +| `Master.Ingress.TLS` | Ingress TLS configuration | `[]` | +| `Master.InitScripts` | List of Jenkins init scripts | Not set | +| `Master.InstallPlugins` | List of Jenkins plugins to install | `kubernetes:0.11 workflow-aggregator:2.5 credentials-binding:1.11 git:3.2.0` | +| `Master.ScriptApproval` | List of groovy functions to approve | Not set | +| `Master.NodeSelector` | Node labels for pod assignment | `{}` | +| `Master.Tolerations` | Toleration labels for pod assignment | `{}` | +| `rbac.install` | Create service account and ClusterRoleBinding for Kubernetes plugin | `false` | +| `rbac.apiVersion` | RBAC API version | `v1beta1` | +| `rbac.roleRef` | Cluster role name to bind to | `cluster-admin` | ### Jenkins Agent | Parameter | Description | Default | | ----------------------- | ----------------------------------------------- | ---------------------- | +| `Agent.AlwaysPullImage` | Always pull agent container image before build | `false` | | `Agent.Enabled` | Enable Kubernetes plugin jnlp-agent podTemplate | `true` | | `Agent.Image` | Agent image name | `jenkinsci/jnlp-slave` | | `Agent.ImageTag` | Agent image tag | `2.62` | | `Agent.Privileged` | Agent privileged container | `false` | | `Agent.Cpu` | Agent requested cpu | `200m` | | `Agent.Memory` | Agent requested memory | `256Mi` | +| `Agent.volumes` | Additional volumes | `nil` | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. @@ -71,6 +77,20 @@ $ helm install --name my-release -f values.yaml stable/jenkins > **Tip**: You can use the default [values.yaml](values.yaml) +## Mounting volumes into your Agent pods + +Your Jenkins Agents will run as pods, and it's possible to inject volumes where needed: + +```yaml +Agent: + volumes: + - type: Secret + secretName: jenkins-mysecrets + mountPath: /var/run/secrets/jenkins-mysecrets +``` + +The suported volume types are: `ConfigMap`, `EmptyDir`, `HostPath`, `Nfs`, `Pod`, `Secret`. Each type supports a different set of configurable attributes, defined by [the corresponding Java class](https://github.com/jenkinsci/kubernetes-plugin/tree/master/src/main/java/org/csanchez/jenkins/plugins/kubernetes/volumes). + ## NetworkPolicy To make use of the NetworkPolicy resources created by default, @@ -123,3 +143,11 @@ jenkins: Master: CustomConfigMap: true ``` + +## RBAC + +If running upon a cluster with RBAC enabled you will need to do the following: + +* `helm install stable/jenkins --set rbac.install=true` +* Create a Jenkins credential of type Kubernetes service account with service account name provided in the `helm status` output. +* Under configure Jenkins -- Update the credentials config in the cloud section to use the service account credential you created in the step above. diff --git a/stable/jenkins/templates/NOTES.txt b/stable/jenkins/templates/NOTES.txt index f93a7f88b374..cc59ba14d8bd 100644 --- a/stable/jenkins/templates/NOTES.txt +++ b/stable/jenkins/templates/NOTES.txt @@ -14,7 +14,7 @@ {{- else if contains "LoadBalancer" .Values.Master.ServiceType }} NOTE: It may take a few minutes for the LoadBalancer IP to be available. You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "fullname" . }}' - export SERVICE_IP=$(kubectl get svc {{ template "fullname" . }} --namespace {{ .Release.Namespace }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") echo http://$SERVICE_IP:{{ .Values.Master.ServicePort }}/login {{- else if contains "ClusterIP" .Values.Master.ServiceType }} @@ -37,3 +37,9 @@ https://cloud.google.com/solutions/jenkins-on-container-engine ###### the Jenkins pod is terminated. ##### ################################################################################# {{- end }} + +{{- if .Values.rbac.install }} +Configure the Kubernetes plugin in Jenkins to use the following Service Account name {{ template "fullname" . }} using the following steps: + Create a Jenkins credential of type Kubernetes service account with service account name {{ template "fullname" . }} + Under configure Jenkins -- Update the credentials config in the cloud section to use the service account credential you created in the step above. +{{- end }} diff --git a/stable/jenkins/templates/config.yaml b/stable/jenkins/templates/config.yaml index 667bdcdab84b..dde70bd7b64b 100644 --- a/stable/jenkins/templates/config.yaml +++ b/stable/jenkins/templates/config.yaml @@ -9,7 +9,7 @@ data: - {{.Values.Master.ImageTag}} + {{ .Values.Master.ImageTag }} 0 NORMAL {{ .Values.Master.UseSecurity }} @@ -35,9 +35,17 @@ data: default 2147483647 0 - + - + +{{- range $index, $volume := .Values.Agent.volumes }} + +{{- range $key, $value := $volume }}{{- if not (eq $key "type") }} + <{{ $key }}>{{ $value }} +{{- end }}{{- end }} + +{{- end }} + jnlp @@ -47,15 +55,15 @@ data: {{- else }} false {{- end }} - false + {{ .Values.Agent.AlwaysPullImage }} /home/jenkins ${computer.jnlpmac} ${computer.name} false - {{.Values.Agent.Cpu}} - {{.Values.Agent.Memory}} - {{.Values.Agent.Cpu}} - {{.Values.Agent.Memory}} + {{ .Values.Agent.Cpu }} + {{ .Values.Agent.Memory }} + {{ .Values.Agent.Cpu }} + {{ .Values.Agent.Memory }} @@ -69,8 +77,8 @@ data: https://kubernetes.default false {{ .Release.Namespace }} - http://{{ template "fullname" . }}:{{.Values.Master.ServicePort}} - {{ template "fullname" . }}:50000 + http://{{ template "fullname" . }}:{{ .Values.Master.ServicePort }} + {{ template "fullname" . }}-agent:50000 10 5 0 @@ -129,7 +137,7 @@ data: {{- end }} {{- range $key, $val := .Values.Master.InitScripts }} init{{ $key }}.groovy: |- -{{ $val | indent 4}} +{{ $val | indent 4 }} {{- end }} plugins.txt: |- {{- if .Values.Master.InstallPlugins }} diff --git a/stable/jenkins/templates/jenkins-agent-svc.yaml b/stable/jenkins/templates/jenkins-agent-svc.yaml new file mode 100644 index 000000000000..193bf8d5fca5 --- /dev/null +++ b/stable/jenkins/templates/jenkins-agent-svc.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "fullname" . }}-agent + labels: + app: {{ template "fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Master.Component }}" +spec: + ports: + - port: {{ .Values.Master.SlaveListenerPort }} + targetPort: {{ .Values.Master.SlaveListenerPort }} + name: slavelistener + selector: + component: "{{ .Release.Name }}-{{ .Values.Master.Component }}" + type: ClusterIP diff --git a/stable/jenkins/templates/jenkins-master-deployment.yaml b/stable/jenkins/templates/jenkins-master-deployment.yaml index 543530e1fdc9..59b26e60854f 100644 --- a/stable/jenkins/templates/jenkins-master-deployment.yaml +++ b/stable/jenkins/templates/jenkins-master-deployment.yaml @@ -3,32 +3,32 @@ kind: Deployment metadata: name: {{ template "fullname" . }} labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Master.Name}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Master.Name }}" spec: replicas: 1 strategy: type: RollingUpdate selector: matchLabels: - component: "{{.Release.Name}}-{{.Values.Master.Component}}" + component: "{{ .Release.Name }}-{{ .Values.Master.Component }}" template: metadata: labels: app: {{ template "fullname" . }} - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Master.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Master.Component }}" annotations: checksum/config: {{ include (print $.Template.BasePath "/config.yaml") . | sha256sum }} pod.alpha.kubernetes.io/init-containers: '[ { "name": "copy-default-config", - "image": "{{.Values.Master.Image}}:{{.Values.Master.ImageTag}}", - "imagePullPolicy": "{{.Values.Master.ImagePullPolicy}}", + "image": "{{ .Values.Master.Image }}:{{ .Values.Master.ImageTag }}", + "imagePullPolicy": "{{ .Values.Master.ImagePullPolicy }}", "command": [ "sh", "/var/jenkins_config/apply_config.sh" ], "volumeMounts": [ { @@ -51,18 +51,26 @@ spec: } ]' spec: + {{- if .Values.Master.NodeSelector }} + nodeSelector: +{{ toYaml .Values.Master.NodeSelector | indent 8 }} + {{- end }} + {{- if .Values.Master.Tolerations }} + tolerations: +{{ toYaml .Values.Master.Tolerations | indent 8 }} + {{- end }} securityContext: runAsUser: 0 containers: - name: {{ template "fullname" . }} - image: "{{.Values.Master.Image}}:{{.Values.Master.ImageTag}}" - imagePullPolicy: "{{.Values.Master.ImagePullPolicy}}" + image: "{{ .Values.Master.Image }}:{{ .Values.Master.ImageTag }}" + imagePullPolicy: "{{ .Values.Master.ImagePullPolicy }}" {{- if .Values.Master.UseSecurity }} args: [ "--argumentsRealm.passwd.$(ADMIN_USER)=$(ADMIN_PASSWORD)", "--argumentsRealm.roles.$(ADMIN_USER)=admin"] {{- end }} env: - name: JAVA_OPTS - value: "{{ default "" .Values.Master.JavaOpts}}" + value: "{{ default "" .Values.Master.JavaOpts }}" {{- if .Values.Master.UseSecurity }} - name: ADMIN_PASSWORD valueFrom: @@ -76,9 +84,9 @@ spec: key: jenkins-admin-user {{- end }} ports: - - containerPort: {{.Values.Master.ContainerPort}} + - containerPort: {{ .Values.Master.ContainerPort }} name: http - - containerPort: {{.Values.Master.SlaveListenerPort}} + - containerPort: {{ .Values.Master.SlaveListenerPort }} name: slavelistener {{- if .Values.Master.JMXPort }} - containerPort: {{ .Values.Master.JMXPort }} @@ -86,8 +94,8 @@ spec: {{- end }} resources: requests: - cpu: "{{.Values.Master.Cpu}}" - memory: "{{.Values.Master.Memory}}" + cpu: "{{ .Values.Master.Cpu }}" + memory: "{{ .Values.Master.Memory }}" volumeMounts: {{- if .Values.Persistence.mounts }} {{ toYaml .Values.Persistence.mounts | indent 12 }} diff --git a/stable/jenkins/templates/jenkins-master-networkpolicy.yaml b/stable/jenkins/templates/jenkins-master-networkpolicy.yaml index 21b9fc5026ce..6034c919f6a3 100644 --- a/stable/jenkins/templates/jenkins-master-networkpolicy.yaml +++ b/stable/jenkins/templates/jenkins-master-networkpolicy.yaml @@ -2,32 +2,32 @@ kind: NetworkPolicy apiVersion: {{ .Values.NetworkPolicy.ApiVersion }} metadata: - name: "{{.Release.Name}}-{{.Values.Master.Component}}" + name: "{{ .Release.Name }}-{{ .Values.Master.Component }}" spec: podSelector: matchLabels: - component: "{{.Release.Name}}-{{.Values.Master.Component}}" + component: "{{ .Release.Name }}-{{ .Values.Master.Component }}" ingress: # Allow web access to the UI - ports: - - port: {{.Values.Master.ContainerPort}} + - port: {{ .Values.Master.ContainerPort }} # Allow inbound connections from slave - from: - podSelector: matchLabels: - "jenkins/{{.Release.Name}}-{{.Values.Agent.Component}}": "true" + "jenkins/{{ .Release.Name }}-{{ .Values.Agent.Component }}": "true" ports: - - port: {{.Values.Master.SlaveListenerPort}} + - port: {{ .Values.Master.SlaveListenerPort }} {{- if .Values.Agent.Enabled }} --- kind: NetworkPolicy apiVersion: {{ .Values.NetworkPolicy.ApiVersion }} metadata: - name: "{{.Release.Name}}-{{.Values.Agent.Component}}" + name: "{{ .Release.Name }}-{{ .Values.Agent.Component }}" spec: podSelector: matchLabels: # DefaultDeny - "jenkins/{{.Release.Name}}-{{.Values.Agent.Component}}": "true" + "jenkins/{{ .Release.Name }}-{{ .Values.Agent.Component }}": "true" {{- end }} {{- end }} diff --git a/stable/jenkins/templates/jenkins-master-svc.yaml b/stable/jenkins/templates/jenkins-master-svc.yaml index 20b45b749844..e30b42552c4b 100644 --- a/stable/jenkins/templates/jenkins-master-svc.yaml +++ b/stable/jenkins/templates/jenkins-master-svc.yaml @@ -8,6 +8,10 @@ metadata: release: {{.Release.Name | quote }} chart: "{{.Chart.Name}}-{{.Chart.Version}}" component: "{{.Release.Name}}-{{.Values.Master.Component}}" +{{- if .Values.Master.ServiceAnnotations }} + annotations: +{{ toYaml .Values.Master.ServiceAnnotations | indent 4 }} +{{- end }} spec: ports: - port: {{.Values.Master.ServicePort}} @@ -16,9 +20,6 @@ spec: {{if (and (eq .Values.Master.ServiceType "NodePort") (not (empty .Values.Master.NodePort)))}} nodePort: {{.Values.Master.NodePort}} {{end}} - - port: {{.Values.Master.SlaveListenerPort}} - targetPort: {{.Values.Master.SlaveListenerPort}} - name: slavelistener selector: component: "{{.Release.Name}}-{{.Values.Master.Component}}" type: {{.Values.Master.ServiceType}} diff --git a/stable/jenkins/templates/jenkins-test.yaml b/stable/jenkins/templates/jenkins-test.yaml index 2722f9d06240..34e9aadff67e 100644 --- a/stable/jenkins/templates/jenkins-test.yaml +++ b/stable/jenkins/templates/jenkins-test.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: Pod metadata: - name: "{{.Release.Name}}-ui-test-{{ randAlphaNum 5 | lower }}" + name: "{{ .Release.Name }}-ui-test-{{ randAlphaNum 5 | lower }}" annotations: "helm.sh/hook": test-success "pod.beta.kubernetes.io/init-containers": '[ @@ -19,9 +19,17 @@ metadata: } ]' spec: + {{- if .Values.Master.NodeSelector }} + nodeSelector: +{{ toYaml .Values.Master.NodeSelector | indent 4 }} + {{- end }} + {{- if .Values.Master.Tolerations }} + tolerations: +{{ toYaml .Values.Master.Tolerations | indent 4 }} + {{- end }} containers: - - name: {{.Release.Name}}-ui-test - image: {{.Values.Master.Image}}:{{.Values.Master.ImageTag}} + - name: {{ .Release.Name }}-ui-test + image: {{ .Values.Master.Image }}:{{ .Values.Master.ImageTag }} command: ["/tools/bats/bats", "-t", "/tests/run.sh"] volumeMounts: - mountPath: /tests diff --git a/stable/jenkins/templates/rbac.yaml b/stable/jenkins/templates/rbac.yaml new file mode 100644 index 000000000000..1e1472dfae87 --- /dev/null +++ b/stable/jenkins/templates/rbac.yaml @@ -0,0 +1,20 @@ +{{ if .Values.rbac.install }} +{{- $serviceName := include "fullname" . -}} +apiVersion: rbac.authorization.k8s.io/{{ required "A valid .Values.rbac.apiVersion entry required!" .Values.rbac.apiVersion }} +kind: ClusterRoleBinding +metadata: + name: {{ $serviceName }}-role-binding + labels: + app: {{ $serviceName }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Values.rbac.roleRef }} +subjects: +- kind: ServiceAccount + name: {{ $serviceName }} + namespace: {{ .Release.Namespace }} +{{ end }} \ No newline at end of file diff --git a/stable/jenkins/templates/service-account.yaml b/stable/jenkins/templates/service-account.yaml new file mode 100644 index 000000000000..b1697504c644 --- /dev/null +++ b/stable/jenkins/templates/service-account.yaml @@ -0,0 +1,12 @@ +{{ if .Values.rbac.install }} +{{- $serviceName := include "fullname" . -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ $serviceName }} + labels: + app: {{ $serviceName }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{ end }} \ No newline at end of file diff --git a/stable/jenkins/templates/test-config.yaml b/stable/jenkins/templates/test-config.yaml index 4723003e0f90..db4f35a5caaa 100644 --- a/stable/jenkins/templates/test-config.yaml +++ b/stable/jenkins/templates/test-config.yaml @@ -5,5 +5,5 @@ metadata: data: run.sh: |- @test "Testing Jenkins UI is accessible" { - curl --retry 12 --retry-delay 10 {{.Release.Name}}-jenkins:8080/login - } \ No newline at end of file + curl --retry 12 --retry-delay 10 {{ .Release.Name }}-jenkins:8080/login + } diff --git a/stable/jenkins/values.yaml b/stable/jenkins/values.yaml index afc61a0d1614..fd90892f1491 100644 --- a/stable/jenkins/values.yaml +++ b/stable/jenkins/values.yaml @@ -6,7 +6,7 @@ Master: Name: jenkins-master Image: "jenkinsci/jenkins" - ImageTag: "2.46.3" + ImageTag: "2.67" ImagePullPolicy: "Always" Component: "jenkins-master" UseSecurity: true @@ -20,6 +20,9 @@ Master: # For minikube, set this to NodePort, elsewhere use LoadBalancer # Use ClusterIP if your setup includes ingress controller ServiceType: LoadBalancer +# Master Service annotations + ServiceAnnotations: {} + # service.beta.kubernetes.io/aws-load-balancer-backend-protocol: https # Used to create Ingress record (should used with ServiceType: ClusterIP) # HostName: jenkins.cluster.local # NodePort: **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/stable/linkerd/templates/config.yaml b/stable/linkerd/templates/config.yaml index c8751daf1c51..f82483c61d60 100644 --- a/stable/linkerd/templates/config.yaml +++ b/stable/linkerd/templates/config.yaml @@ -34,4 +34,4 @@ data: servers: - ip: 0.0.0.0 port: 4140 -{{ end}} +{{ end }} diff --git a/stable/linkerd/templates/daemonset.yaml b/stable/linkerd/templates/daemonset.yaml index 75df5a5d81dd..2a8dc0ddcbc0 100644 --- a/stable/linkerd/templates/daemonset.yaml +++ b/stable/linkerd/templates/daemonset.yaml @@ -24,7 +24,7 @@ spec: name: "{{ template "fullname" . }}-config" containers: - name: {{ .Values.linkerd.name }} - image: {{ .Values.linkerd.image}} + image: {{ .Values.linkerd.image }} env: - name: POD_IP valueFrom: @@ -55,7 +55,7 @@ spec: mountPath: "/io.buoyant/linkerd/config" readOnly: true - name: kubectl - image: {{ .Values.kubectl.image}} + image: {{ .Values.kubectl.image }} args: - "proxy" - "-p" diff --git a/stable/locust/templates/NOTES.txt b/stable/locust/templates/NOTES.txt index b090947187b6..10a41ddb5d61 100644 --- a/stable/locust/templates/NOTES.txt +++ b/stable/locust/templates/NOTES.txt @@ -10,7 +10,7 @@ Get the Locust URL to visit by running these commands in the same shell: {{- else if contains "LoadBalancer" .Values.service.type }} NOTE: It may take a few minutes for the LoadBalancer IP to be available. You can watch the status by running 'kubectl get svc -n {{ .Release.Namespace }} -w {{ template "locust.master-svc" . }}' - export SERVICE_IP=$(kubectl get svc -n {{ .Release.Namespace }} {{ template "locust.master-svc" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "locust.master-svc" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") export LOCUST_URL=http://$SERVICE_IP:{{ .Values.service.externalPort }}/ {{- else if contains "ClusterIP" .Values.service.type }} diff --git a/stable/locust/templates/master-deploy.yaml b/stable/locust/templates/master-deploy.yaml index 0635f8bb9b05..bf39be6ea0c6 100644 --- a/stable/locust/templates/master-deploy.yaml +++ b/stable/locust/templates/master-deploy.yaml @@ -6,15 +6,15 @@ metadata: app: {{ template "locust.fullname" . }} heritage: {{ .Release.Service | quote }} release: {{ .Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" component: master spec: replicas: 1 strategy: type: RollingUpdate rollingUpdate: - maxSurge: {{default 1 .Values.master.maxSurge}} - maxUnavailable: {{default 1 .Values.master.maxUnavailable}} + maxSurge: {{ default 1 .Values.master.maxSurge }} + maxUnavailable: {{ default 1 .Values.master.maxUnavailable }} template: metadata: labels: diff --git a/stable/locust/templates/master-svc.yaml b/stable/locust/templates/master-svc.yaml index 0e30a4f5a9e1..94387662bf3a 100644 --- a/stable/locust/templates/master-svc.yaml +++ b/stable/locust/templates/master-svc.yaml @@ -5,7 +5,7 @@ metadata: labels: heritage: {{ .Release.Service | quote }} release: {{ .Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" app: {{ template "locust.fullname" . }} component: "master" spec: @@ -14,7 +14,7 @@ spec: - name: {{ .Values.service.name }} port: {{ .Values.service.externalPort }} targetPort: {{ .Values.service.internalPort }} - nodePort: {{ .Values.service.nodePort}} + nodePort: {{ .Values.service.nodePort }} protocol: TCP - name: master-p1 port: 5557 diff --git a/stable/locust/templates/worker-deploy.yaml b/stable/locust/templates/worker-deploy.yaml index ba74ec0f5c84..f857e929b0f5 100644 --- a/stable/locust/templates/worker-deploy.yaml +++ b/stable/locust/templates/worker-deploy.yaml @@ -5,16 +5,16 @@ metadata: labels: heritage: {{ .Release.Service | quote }} release: {{ .Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" app: {{ template "locust.fullname" . }} component: worker spec: - replicas: {{default 2 .Values.worker.replicaCount}} + replicas: {{ default 2 .Values.worker.replicaCount }} strategy: type: RollingUpdate rollingUpdate: - maxSurge: {{default 1 .Values.worker.maxSurge}} - maxUnavailable: {{default 1 .Values.worker.maxUnavailable}} + maxSurge: {{ default 1 .Values.worker.maxSurge }} + maxUnavailable: {{ default 1 .Values.worker.maxUnavailable }} template: metadata: labels: diff --git a/stable/magento/Chart.yaml b/stable/magento/Chart.yaml index a4a9b8a12b83..99b0e54a2472 100644 --- a/stable/magento/Chart.yaml +++ b/stable/magento/Chart.yaml @@ -1,5 +1,5 @@ name: magento -version: 0.4.8 +version: 0.4.9 appVersion: 2.1.7 description: A feature-rich flexible e-commerce solution. It includes transaction options, multi-store functionality, loyalty programs, product categorization and shopper filtering, promotion rules, and more. keywords: diff --git a/stable/magento/requirements.lock b/stable/magento/requirements.lock index 27011bda5530..c6ee67b98a1c 100644 --- a/stable/magento/requirements.lock +++ b/stable/magento/requirements.lock @@ -5,6 +5,6 @@ dependencies: name: mariadb repository: https://kubernetes-charts.storage.googleapis.com/ tags: null - version: 0.6.2 -digest: sha256:66acb700f673b56045b00d0b65e3ab750f12941005e7631d69bd4101f51424ab -generated: 2017-05-18T13:23:41.14070852-04:00 + version: 0.6.3 +digest: sha256:99135a083bee8717224e9f5a4e151b2831bc8367b1b88075b6405dd190ac7a11 +generated: 2017-06-22T19:36:47.593304157-04:00 diff --git a/stable/magento/requirements.yaml b/stable/magento/requirements.yaml index e12e2163630d..d376de89806e 100644 --- a/stable/magento/requirements.yaml +++ b/stable/magento/requirements.yaml @@ -1,4 +1,4 @@ dependencies: - name: mariadb - version: 0.6.2 + version: 0.6.3 repository: https://kubernetes-charts.storage.googleapis.com/ diff --git a/stable/magento/templates/NOTES.txt b/stable/magento/templates/NOTES.txt index dc1e202b922b..8afc2574d9e5 100644 --- a/stable/magento/templates/NOTES.txt +++ b/stable/magento/templates/NOTES.txt @@ -18,7 +18,7 @@ host. To configure Magento with the URL of your service: NOTE: It may take a few minutes for the LoadBalancer IP to be available. Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "fullname" . }}' - export APP_HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + export APP_HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") export APP_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "fullname" . }} -o jsonpath="{.data.magento-password}" | base64 --decode) {{- if .Values.mariadb.mariadbRootPassword }} export APP_DATABASE_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mariadb.fullname" . }} -o jsonpath="{.data.mariadb-root-password}" | base64 --decode) diff --git a/stable/mailhog/templates/service.yaml b/stable/mailhog/templates/service.yaml index faeea969fc24..33d49c4a1e09 100644 --- a/stable/mailhog/templates/service.yaml +++ b/stable/mailhog/templates/service.yaml @@ -3,7 +3,7 @@ kind: Service metadata: {{- if .Values.service.annotations }} annotations: -{{ toYaml .Values.service.annotations | indent 4}} +{{ toYaml .Values.service.annotations | indent 4 }} {{- end }} labels: app: {{ template "name" . }} diff --git a/stable/mariadb/README.md b/stable/mariadb/README.md index 50b8bafe63a6..7e7044445100 100644 --- a/stable/mariadb/README.md +++ b/stable/mariadb/README.md @@ -107,7 +107,7 @@ helm install --name my-release -f mariadb-values.yaml stable/mariadb The [Bitnami MariaDB](https://github.com/bitnami/bitnami-docker-mariadb) image stores the MariaDB data and configurations at the `/bitnami/mariadb` path of the container. -The chart mounts a [Persistent Volume](kubernetes.io/docs/user-guide/persistent-volumes/) volume at this location. The volume is created using dynamic volume provisioning, by default. An existing PersistentVolumeClaim can be defined. +The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) volume at this location. The volume is created using dynamic volume provisioning, by default. An existing PersistentVolumeClaim can be defined. ### Existing PersistentVolumeClaims diff --git a/stable/mediawiki/Chart.yaml b/stable/mediawiki/Chart.yaml index 60f019de6104..7b1799acebc9 100644 --- a/stable/mediawiki/Chart.yaml +++ b/stable/mediawiki/Chart.yaml @@ -1,5 +1,5 @@ name: mediawiki -version: 0.4.10 +version: 0.4.11 appVersion: 1.28.2 description: Extremely powerful, scalable software and a feature-rich wiki implementation that uses PHP to process and display data stored in a database. keywords: diff --git a/stable/mediawiki/requirements.lock b/stable/mediawiki/requirements.lock index 80d783caf639..904f41c303ba 100644 --- a/stable/mediawiki/requirements.lock +++ b/stable/mediawiki/requirements.lock @@ -5,6 +5,6 @@ dependencies: name: mariadb repository: https://kubernetes-charts.storage.googleapis.com/ tags: null - version: 0.6.2 -digest: sha256:66acb700f673b56045b00d0b65e3ab750f12941005e7631d69bd4101f51424ab -generated: 2017-05-18T13:23:41.48914304-04:00 + version: 0.6.3 +digest: sha256:99135a083bee8717224e9f5a4e151b2831bc8367b1b88075b6405dd190ac7a11 +generated: 2017-06-22T19:36:48.003872486-04:00 diff --git a/stable/mediawiki/requirements.yaml b/stable/mediawiki/requirements.yaml index e12e2163630d..d376de89806e 100644 --- a/stable/mediawiki/requirements.yaml +++ b/stable/mediawiki/requirements.yaml @@ -1,4 +1,4 @@ dependencies: - name: mariadb - version: 0.6.2 + version: 0.6.3 repository: https://kubernetes-charts.storage.googleapis.com/ diff --git a/stable/minecraft/templates/deployment.yaml b/stable/minecraft/templates/deployment.yaml index 3e72a84783da..40e398576a44 100644 --- a/stable/minecraft/templates/deployment.yaml +++ b/stable/minecraft/templates/deployment.yaml @@ -98,7 +98,7 @@ spec: - name: rcon containerPort: {{ .Values.minecraftServer.rcon.port }} protocol: TCP - {{- end}} + {{- end }} volumeMounts: - name: datadir mountPath: /data @@ -106,7 +106,7 @@ spec: - name: datadir {{- if .Values.persistence.dataDir.enabled }} persistentVolumeClaim: - claimName: {{template "fullname" .}}-datadir + claimName: {{ template "fullname" . }}-datadir {{- else }} emptyDir: {} {{- end }} diff --git a/stable/minio/templates/minio_statefulset.yaml b/stable/minio/templates/minio_statefulset.yaml index 43835d4f7d0c..66e11aea9bcb 100644 --- a/stable/minio/templates/minio_statefulset.yaml +++ b/stable/minio/templates/minio_statefulset.yaml @@ -67,7 +67,7 @@ spec: args: - server {{- range $i := until $nodeCount }} - - http://{{ template "fullname" $ }}-{{$i}}.{{ template "fullname" $ }}.{{ $.Release.Namespace }}.svc.cluster.local{{ $.Values.mountPath }} + - http://{{ template "fullname" $ }}-{{ $i }}.{{ template "fullname" $ }}.{{ $.Release.Namespace }}.svc.cluster.local{{ $.Values.mountPath }} {{- end }} {{- end }} volumeMounts: diff --git a/stable/mongodb-replicaset/README.md b/stable/mongodb-replicaset/README.md index d8950f926d83..b8141efe5497 100644 --- a/stable/mongodb-replicaset/README.md +++ b/stable/mongodb-replicaset/README.md @@ -20,8 +20,8 @@ using Kubernetes StatefulSets and Init Containers. To install the chart with the release name `my-release`: ```console -$ helm repo add incubator http://storage.googleapis.com/kubernetes-charts-incubator -$ helm install --name my-release incubator/mongodb-replicaset +$ helm repo add stable https://kubernetes-charts.storage.googleapis.com/ +$ helm install --name my-release stable/mongodb-replicaset ``` ## Configuration @@ -101,17 +101,18 @@ export RELEASE_NAME=messy-hydra ### Cluster Health ```console -$ for i in 0 1 2; do kubectl exec $RELEASE_NAME-mongodb-$i -- sh -c 'mongo --eval="printjson(db.serverStatus())"'; done +$ for i in 0 1 2; do kubectl exec $RELEASE_NAME-mongodb-replicaset-$i -- sh -c 'mongo --eval="printjson(db.serverStatus())"'; done ``` ### Failover One can check the roles being played by each node by using the following: ```console -$ for i in 0 1 2; do kubectl exec $RELEASE_NAME-mongodb-$i -- sh -c 'mongo --eval="printjson(rs.isMaster())"'; done +$ for i in 0 1 2; do kubectl exec $RELEASE_NAME-mongodb-replicaset-$i -- sh -c 'mongo --eval="printjson(rs.isMaster())"'; done -MongoDB shell version: 3.2.9 -connecting to: test +MongoDB shell version: 3.4.5 +connecting to: mongodb://127.0.0.1:27017 +MongoDB server version: 3.4.5 { "hosts" : [ "messy-hydra-mongodb-0.messy-hydra-mongodb.default.svc.cluster.local:27017", @@ -140,16 +141,16 @@ This lets us see which member is primary. Let us now test persistence and failover. First, we insert a key (in the below example, we assume pod 0 is the master): ```console -$ kubectl exec $RELEASE_NAME-mongodb-0 -- mongo --eval="printjson(db.test.insert({key1: 'value1'}))" +$ kubectl exec $RELEASE_NAME-mongodb-replicaset-0 -- mongo --eval="printjson(db.test.insert({key1: 'value1'}))" -MongoDB shell version: 3.2.8 -connecting to: test +MongoDB shell version: 3.4.5 +connecting to: mongodb://127.0.0.1:27017 { "nInserted" : 1 } ``` Watch existing members: ```console -$ kubectl run --attach bbox --image=mongo:3.2 --restart=Never -- sh -c 'while true; do for i in 0 1 2; do echo <$release-podname-$i> $(mongo --host=$RELEASE_NAME-mongodb-$i.$RELEASE_NAME-mongodb --eval="printjson(rs.isMaster())" | grep primary); sleep 1; done; done'; +$ kubectl run --attach bbox --image=mongo:3.4 --restart=Never --env="RELEASE_NAME=$RELEASE_NAME" -- sh -c 'while true; do for i in 0 1 2; do echo $RELEASE_NAME-mongodb-replicaset-$i $(mongo --host=$RELEASE_NAME-mongodb-replicaset-$i.$RELEASE_NAME-mongodb-replicaset --eval="printjson(rs.isMaster())" | grep primary); sleep 1; done; done'; Waiting for pod default/bbox2 to be running, status is Pending, pod ready: false If you don't see a command prompt, try pressing enter. @@ -163,14 +164,14 @@ messy-hydra-mongodb-0 "primary" : "messy-hydra-mongodb-0.messy-hydra-mongodb.def Kill the primary and watch as a new master getting elected. ```console -$ kubectl delete pod $RELEASE_NAME-mongodb-0 +$ kubectl delete pod $RELEASE_NAME-mongodb-replicaset-0 pod "messy-hydra-mongodb-0" deleted ``` Delete all pods and let the statefulset controller bring it up. ```console -$ kubectl delete po -l app=mongodb +$ kubectl delete po -l "app=mongodb-replicaset,release=$RELEASE_NAME" $ kubectl get po --watch-only NAME READY STATUS RESTARTS AGE messy-hydra-mongodb-0 0/1 Pending 0 0s @@ -208,10 +209,10 @@ messy-hydra-mongodb-2 "primary" : "messy-hydra-mongodb-0.messy-hydra-mongodb.def Check the previously inserted key: ```console -$ kubectl exec $RELEASE_NAME-mongodb-1 -- mongo --eval="rs.slaveOk(); db.test.find({key1:{\$exists:true}}).forEach(printjson)" +$ kubectl exec $RELEASE_NAME-mongodb-replicaset-1 -- mongo --eval="rs.slaveOk(); db.test.find({key1:{\$exists:true}}).forEach(printjson)" -MongoDB shell version: 3.2.8 -connecting to: test +MongoDB shell version: 3.4.5 +connecting to: mongodb://127.0.0.1:27017 { "_id" : ObjectId("57b180b1a7311d08f2bfb617"), "key1" : "value1" } ``` diff --git a/stable/mongodb-replicaset/templates/mongodb-statefulset.yaml b/stable/mongodb-replicaset/templates/mongodb-statefulset.yaml index e7832d74dd71..f5f26ce16149 100644 --- a/stable/mongodb-replicaset/templates/mongodb-statefulset.yaml +++ b/stable/mongodb-replicaset/templates/mongodb-statefulset.yaml @@ -17,7 +17,7 @@ spec: release: {{ .Release.Name }} annotations: {{- if .Values.podAnnotations }} -{{ toYaml .Values.podAnnotations | indent 8}} +{{ toYaml .Values.podAnnotations | indent 8 }} {{- end }} pod.alpha.kubernetes.io/init-containers: '[ { diff --git a/stable/mongodb/README.md b/stable/mongodb/README.md index 8de6c0baf465..350d12070b18 100644 --- a/stable/mongodb/README.md +++ b/stable/mongodb/README.md @@ -80,4 +80,4 @@ $ helm install --name my-release -f values.yaml stable/mongodb The [Bitnami MongoDB](https://github.com/bitnami/bitnami-docker-mongodb) image stores the MongoDB data and configurations at the `/bitnami/mongodb` path of the container. -The chart mounts a [Persistent Volume](kubernetes.io/docs/user-guide/persistent-volumes/) volume at this location. The volume is created using dynamic volume provisioning. +The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) volume at this location. The volume is created using dynamic volume provisioning. diff --git a/stable/moodle/Chart.yaml b/stable/moodle/Chart.yaml index a4074cfafc13..575629f3e9c8 100644 --- a/stable/moodle/Chart.yaml +++ b/stable/moodle/Chart.yaml @@ -1,5 +1,5 @@ name: moodle -version: 0.1.8 +version: 0.1.9 description: Moodle is a learning platform designed to provide educators, administrators and learners with a single robust, secure and integrated system to create personalised learning environments keywords: - moodle diff --git a/stable/moodle/requirements.lock b/stable/moodle/requirements.lock index d0297a67f9cc..55d1ea5364e3 100644 --- a/stable/moodle/requirements.lock +++ b/stable/moodle/requirements.lock @@ -5,6 +5,6 @@ dependencies: name: mariadb repository: https://kubernetes-charts.storage.googleapis.com/ tags: null - version: 0.6.2 -digest: sha256:66acb700f673b56045b00d0b65e3ab750f12941005e7631d69bd4101f51424ab -generated: 2017-05-18T13:23:41.82654147-04:00 + version: 0.6.3 +digest: sha256:99135a083bee8717224e9f5a4e151b2831bc8367b1b88075b6405dd190ac7a11 +generated: 2017-06-22T19:36:48.423522241-04:00 diff --git a/stable/moodle/requirements.yaml b/stable/moodle/requirements.yaml index e12e2163630d..d376de89806e 100644 --- a/stable/moodle/requirements.yaml +++ b/stable/moodle/requirements.yaml @@ -1,4 +1,4 @@ dependencies: - name: mariadb - version: 0.6.2 + version: 0.6.3 repository: https://kubernetes-charts.storage.googleapis.com/ diff --git a/stable/nginx-ingress/templates/controller-daemonset.yaml b/stable/nginx-ingress/templates/controller-daemonset.yaml index bb93115f854d..0363253ea4ab 100644 --- a/stable/nginx-ingress/templates/controller-daemonset.yaml +++ b/stable/nginx-ingress/templates/controller-daemonset.yaml @@ -14,7 +14,7 @@ spec: metadata: {{- if .Values.controller.podAnnotations }} annotations: -{{ toYaml .Values.controller.podAnnotations | indent 8}} +{{ toYaml .Values.controller.podAnnotations | indent 8 }} {{- end }} labels: app: {{ template "name" . }} @@ -31,7 +31,7 @@ spec: - --default-backend-service={{ if .Values.defaultBackend.enabled }}{{ .Release.Namespace }}/{{ template "defaultBackend.fullname" . }}{{ else }}{{ .Values.controller.defaultBackendService }}{{ end }} {{- if and (contains "0.9" .Values.controller.image.tag) .Values.controller.publishService.enabled }} - --publish-service={{ template "controller.publishServicePath" . }} - {{- end}} + {{- end }} {{- if (contains "0.9" .Values.controller.image.tag) }} - --configmap={{ .Release.Namespace }}/{{ template "controller.fullname" . }} {{- else }} diff --git a/stable/nginx-ingress/templates/controller-deployment.yaml b/stable/nginx-ingress/templates/controller-deployment.yaml index f7cce2e99562..43af40606d28 100644 --- a/stable/nginx-ingress/templates/controller-deployment.yaml +++ b/stable/nginx-ingress/templates/controller-deployment.yaml @@ -15,7 +15,7 @@ spec: metadata: {{- if .Values.controller.podAnnotations }} annotations: -{{ toYaml .Values.controller.podAnnotations | indent 8}} +{{ toYaml .Values.controller.podAnnotations | indent 8 }} {{- end }} labels: app: {{ template "name" . }} @@ -32,7 +32,7 @@ spec: - --default-backend-service={{ if .Values.defaultBackend.enabled }}{{ .Release.Namespace }}/{{ template "defaultBackend.fullname" . }}{{ else }}{{ .Values.controller.defaultBackendService }}{{ end }} {{- if and (contains "0.9" .Values.controller.image.tag) .Values.controller.publishService.enabled }} - --publish-service={{ template "controller.publishServicePath" . }} - {{- end}} + {{- end }} {{- if (contains "0.9" .Values.controller.image.tag) }} - --configmap={{ .Release.Namespace }}/{{ template "controller.fullname" . }} {{- else }} diff --git a/stable/nginx-ingress/templates/controller-service.yaml b/stable/nginx-ingress/templates/controller-service.yaml index 7eeb95ab249f..25b4fb86f316 100644 --- a/stable/nginx-ingress/templates/controller-service.yaml +++ b/stable/nginx-ingress/templates/controller-service.yaml @@ -3,7 +3,7 @@ kind: Service metadata: {{- if .Values.controller.service.annotations }} annotations: -{{ toYaml .Values.controller.service.annotations | indent 4}} +{{ toYaml .Values.controller.service.annotations | indent 4 }} {{- end }} labels: app: {{ template "name" . }} diff --git a/stable/nginx-ingress/templates/default-backend-deployment.yaml b/stable/nginx-ingress/templates/default-backend-deployment.yaml index 4663e50c18e1..c1b967cac7b0 100644 --- a/stable/nginx-ingress/templates/default-backend-deployment.yaml +++ b/stable/nginx-ingress/templates/default-backend-deployment.yaml @@ -15,7 +15,7 @@ spec: metadata: {{- if .Values.defaultBackend.podAnnotations }} annotations: -{{ toYaml .Values.defaultBackend.podAnnotations | indent 8}} +{{ toYaml .Values.defaultBackend.podAnnotations | indent 8 }} {{- end }} labels: app: {{ template "name" . }} diff --git a/stable/opencart/Chart.yaml b/stable/opencart/Chart.yaml index 6c4b468067e3..86f75c381b3b 100644 --- a/stable/opencart/Chart.yaml +++ b/stable/opencart/Chart.yaml @@ -1,5 +1,5 @@ name: opencart -version: 0.4.8 +version: 0.4.9 description: A free and open source e-commerce platform for online merchants. It provides a professional and reliable foundation for a successful online store. keywords: - opencart diff --git a/stable/opencart/requirements.lock b/stable/opencart/requirements.lock index 39f0b13999aa..767276cae762 100644 --- a/stable/opencart/requirements.lock +++ b/stable/opencart/requirements.lock @@ -5,6 +5,6 @@ dependencies: name: mariadb repository: https://kubernetes-charts.storage.googleapis.com/ tags: null - version: 0.6.2 -digest: sha256:66acb700f673b56045b00d0b65e3ab750f12941005e7631d69bd4101f51424ab -generated: 2017-05-18T13:23:42.160889563-04:00 + version: 0.6.3 +digest: sha256:99135a083bee8717224e9f5a4e151b2831bc8367b1b88075b6405dd190ac7a11 +generated: 2017-06-22T19:36:48.827411091-04:00 diff --git a/stable/opencart/requirements.yaml b/stable/opencart/requirements.yaml index e12e2163630d..d376de89806e 100644 --- a/stable/opencart/requirements.yaml +++ b/stable/opencart/requirements.yaml @@ -1,4 +1,4 @@ dependencies: - name: mariadb - version: 0.6.2 + version: 0.6.3 repository: https://kubernetes-charts.storage.googleapis.com/ diff --git a/stable/opencart/templates/NOTES.txt b/stable/opencart/templates/NOTES.txt index 3972a292fdce..6a3192c03263 100644 --- a/stable/opencart/templates/NOTES.txt +++ b/stable/opencart/templates/NOTES.txt @@ -18,7 +18,7 @@ host. To configure OpenCart with the URL of your service: NOTE: It may take a few minutes for the LoadBalancer IP to be available. Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "fullname" . }}' - export APP_HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + export APP_HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") export APP_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "fullname" . }} -o jsonpath="{.data.opencart-password}" | base64 --decode) {{- if .Values.mariadb.mariadbRootPassword }} export APP_DATABASE_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mariadb.fullname" . }} -o jsonpath="{.data.mariadb-root-password}" | base64 --decode) diff --git a/stable/openvpn/templates/config-openvpn.yaml b/stable/openvpn/templates/config-openvpn.yaml index 881ef582555a..2dc253a96faa 100644 --- a/stable/openvpn/templates/config-openvpn.yaml +++ b/stable/openvpn/templates/config-openvpn.yaml @@ -16,10 +16,10 @@ data: ./easyrsa init-pki echo "ca\n" | ./easyrsa build-ca nopass ./easyrsa build-server-full server nopass - ./easyrsa gen-dh + ./easyrsa gen-dh fi - - + + newClientCert.sh: |- #!/bin/bash EASY_RSA_LOC="/etc/openvpn/certs" @@ -48,7 +48,7 @@ data: EOF cat pki/$1.ovpn - + configure.sh: |- #!/bin/sh /etc/openvpn/setup/setup-certs.sh @@ -57,7 +57,7 @@ data: if [ ! -c /dev/net/tun ]; then mknod /dev/net/tun c 10 200 fi - + if [ "$DEBUG" == "1" ]; then echo ========== ${OVPN_CONFIG} ========== cat "${OVPN_CONFIG}" @@ -72,7 +72,7 @@ data: sed 's|OVPN_K8S_SEARCH|'"${SEARCH}"'|' -i /etc/openvpn/openvpn.conf sed 's|OVPN_K8S_DNS|'"${DNS}"'|' -i /etc/openvpn/openvpn.conf sed 's|NETWORK|'"${NETWORK}"'|' -i /etc/openvpn/openvpn.conf - + openvpn --config /etc/openvpn/openvpn.conf openvpn.conf: |- server {{ .Values.openvpn.OVPN_NETWORK }} {{ .Values.openvpn.OVPN_SUBNET }} @@ -81,24 +81,24 @@ data: ca /etc/openvpn/certs/pki/ca.crt cert /etc/openvpn/certs/pki/issued/server.crt dh /etc/openvpn/certs/pki/dh.pem - + key-direction 0 keepalive 10 60 persist-key persist-tun - - proto {{.Values.openvpn.OVPN_PROTO}} + + proto {{ .Values.openvpn.OVPN_PROTO }} port {{ .Values.service.internalPort }} dev tun0 status /tmp/openvpn-status.log - + user nobody group nogroup - + push "route NETWORK 255.255.240.0" {{ if (.Values.openvpn.OVPN_K8S_POD_NETWORK) (.Values.openvpn.OVPN_K8S_POD_SUBNET) }} - push "route {{ .Values.openvpn.OVPN_K8S_POD_NETWORK }} {{.Values.openvpn.OVPN_K8S_POD_SUBNET}}" -{{end}} + push "route {{ .Values.openvpn.OVPN_K8S_POD_NETWORK }} {{ .Values.openvpn.OVPN_K8S_POD_SUBNET }}" +{{ end }} push "dhcp-option DOMAIN OVPN_K8S_SEARCH" - push "dhcp-option DNS OVPN_K8S_DNS" \ No newline at end of file + push "dhcp-option DNS OVPN_K8S_DNS" diff --git a/stable/openvpn/templates/openvpn-deployment.yaml b/stable/openvpn/templates/openvpn-deployment.yaml index 11ef3e695b3b..de473514f913 100644 --- a/stable/openvpn/templates/openvpn-deployment.yaml +++ b/stable/openvpn/templates/openvpn-deployment.yaml @@ -3,9 +3,9 @@ kind: Deployment metadata: name: {{ template "fullname" . }} labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" spec: replicas: {{ .Values.replicaCount }} @@ -14,9 +14,9 @@ spec: labels: app: {{ template "fullname" . }} type: openvpn - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" spec: containers: - name: {{ .Chart.Name }} @@ -32,11 +32,11 @@ spec: - NET_ADMIN resources: requests: - cpu: "{{.Values.resources.requests.cpu}}" - memory: "{{.Values.resources.requests.memory}}" + cpu: "{{ .Values.resources.requests.cpu }}" + memory: "{{ .Values.resources.requests.memory }}" limits: - cpu: "{{.Values.resources.limits.cpu}}" - memory: "{{.Values.resources.limits.memory}}" + cpu: "{{ .Values.resources.limits.cpu }}" + memory: "{{ .Values.resources.limits.memory }}" volumeMounts: - mountPath: /etc/openvpn/setup name: openvpn diff --git a/stable/orangehrm/Chart.yaml b/stable/orangehrm/Chart.yaml index 7f945ef8da82..0fcbc3a895a9 100644 --- a/stable/orangehrm/Chart.yaml +++ b/stable/orangehrm/Chart.yaml @@ -1,5 +1,5 @@ name: orangehrm -version: 0.4.9 +version: 0.4.10 appVersion: 3.3.3 description: OrangeHRM is a free HR management system that offers a wealth of modules to suit the needs of your business. keywords: diff --git a/stable/orangehrm/requirements.lock b/stable/orangehrm/requirements.lock index 7bd26529c711..77c4d40a38f0 100644 --- a/stable/orangehrm/requirements.lock +++ b/stable/orangehrm/requirements.lock @@ -5,6 +5,6 @@ dependencies: name: mariadb repository: https://kubernetes-charts.storage.googleapis.com/ tags: null - version: 0.6.2 -digest: sha256:66acb700f673b56045b00d0b65e3ab750f12941005e7631d69bd4101f51424ab -generated: 2017-05-18T13:23:42.493240041-04:00 + version: 0.6.3 +digest: sha256:99135a083bee8717224e9f5a4e151b2831bc8367b1b88075b6405dd190ac7a11 +generated: 2017-06-22T19:36:49.270793583-04:00 diff --git a/stable/orangehrm/requirements.yaml b/stable/orangehrm/requirements.yaml index e12e2163630d..d376de89806e 100644 --- a/stable/orangehrm/requirements.yaml +++ b/stable/orangehrm/requirements.yaml @@ -1,4 +1,4 @@ dependencies: - name: mariadb - version: 0.6.2 + version: 0.6.3 repository: https://kubernetes-charts.storage.googleapis.com/ diff --git a/stable/osclass/Chart.yaml b/stable/osclass/Chart.yaml index e494d810861f..96852cb68d69 100644 --- a/stable/osclass/Chart.yaml +++ b/stable/osclass/Chart.yaml @@ -1,5 +1,5 @@ name: osclass -version: 0.4.4 +version: 0.4.5 appVersion: 3.7.3 description: Osclass is a php script that allows you to quickly create and manage your own free classifieds site. keywords: diff --git a/stable/osclass/requirements.lock b/stable/osclass/requirements.lock index 6a3af5177996..fea4e20439d4 100644 --- a/stable/osclass/requirements.lock +++ b/stable/osclass/requirements.lock @@ -5,6 +5,6 @@ dependencies: name: mariadb repository: https://kubernetes-charts.storage.googleapis.com/ tags: null - version: 0.6.2 -digest: sha256:66acb700f673b56045b00d0b65e3ab750f12941005e7631d69bd4101f51424ab -generated: 2017-05-18T13:23:42.831398714-04:00 + version: 0.6.3 +digest: sha256:99135a083bee8717224e9f5a4e151b2831bc8367b1b88075b6405dd190ac7a11 +generated: 2017-06-22T19:36:49.704907344-04:00 diff --git a/stable/osclass/requirements.yaml b/stable/osclass/requirements.yaml index e12e2163630d..d376de89806e 100644 --- a/stable/osclass/requirements.yaml +++ b/stable/osclass/requirements.yaml @@ -1,4 +1,4 @@ dependencies: - name: mariadb - version: 0.6.2 + version: 0.6.3 repository: https://kubernetes-charts.storage.googleapis.com/ diff --git a/stable/osclass/templates/NOTES.txt b/stable/osclass/templates/NOTES.txt index 87b05edbe4c0..2ca23fd550f6 100644 --- a/stable/osclass/templates/NOTES.txt +++ b/stable/osclass/templates/NOTES.txt @@ -18,7 +18,7 @@ host. To configure Osclass with the URL of your service: NOTE: It may take a few minutes for the LoadBalancer IP to be available. Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "fullname" . }}' - export APP_HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + export APP_HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") export APP_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "fullname" . }} -o jsonpath="{.data.osclass-password}" | base64 --decode) {{- if .Values.mariadb.mariadbRootPassword }} export APP_DATABASE_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mariadb.fullname" . }} -o jsonpath="{.data.mariadb-root-password}" | base64 --decode) diff --git a/stable/owncloud/Chart.yaml b/stable/owncloud/Chart.yaml index 6b2ed6e909ae..f9e9cc14d329 100644 --- a/stable/owncloud/Chart.yaml +++ b/stable/owncloud/Chart.yaml @@ -1,5 +1,5 @@ name: owncloud -version: 0.4.10 +version: 0.4.11 description: A file sharing server that puts the control and security of your own data back into your hands. keywords: - owncloud diff --git a/stable/owncloud/requirements.lock b/stable/owncloud/requirements.lock index f32af8746fa9..c172c7ee5298 100644 --- a/stable/owncloud/requirements.lock +++ b/stable/owncloud/requirements.lock @@ -5,6 +5,6 @@ dependencies: name: mariadb repository: https://kubernetes-charts.storage.googleapis.com/ tags: null - version: 0.6.2 -digest: sha256:66acb700f673b56045b00d0b65e3ab750f12941005e7631d69bd4101f51424ab -generated: 2017-05-18T13:23:43.165443848-04:00 + version: 0.6.3 +digest: sha256:99135a083bee8717224e9f5a4e151b2831bc8367b1b88075b6405dd190ac7a11 +generated: 2017-06-22T19:36:50.149824575-04:00 diff --git a/stable/owncloud/requirements.yaml b/stable/owncloud/requirements.yaml index e12e2163630d..d376de89806e 100644 --- a/stable/owncloud/requirements.yaml +++ b/stable/owncloud/requirements.yaml @@ -1,4 +1,4 @@ dependencies: - name: mariadb - version: 0.6.2 + version: 0.6.3 repository: https://kubernetes-charts.storage.googleapis.com/ diff --git a/stable/owncloud/templates/NOTES.txt b/stable/owncloud/templates/NOTES.txt index 3339a003f096..d602874ae058 100644 --- a/stable/owncloud/templates/NOTES.txt +++ b/stable/owncloud/templates/NOTES.txt @@ -18,7 +18,7 @@ host. To configure ownCloud with the URL of your service: NOTE: It may take a few minutes for the LoadBalancer IP to be available. Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "fullname" . }}' - export APP_HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + export APP_HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") export APP_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "fullname" . }} -o jsonpath="{.data.owncloud1-password}" | base64 --decode) {{- if .Values.mariadb.mariadbRootPassword }} export APP_DATABASE_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mariadb.fullname" . }} -o jsonpath="{.data.mariadb-root-password}" | base64 --decode) diff --git a/stable/phabricator/Chart.yaml b/stable/phabricator/Chart.yaml index c88dee73398a..69ee8e5d1fe3 100644 --- a/stable/phabricator/Chart.yaml +++ b/stable/phabricator/Chart.yaml @@ -1,5 +1,5 @@ name: phabricator -version: 0.4.9 +version: 0.4.10 appVersion: 2017.23 description: Collection of open source web applications that help software companies build better software. keywords: diff --git a/stable/phabricator/requirements.lock b/stable/phabricator/requirements.lock index a29e329005a0..2882c1118d9f 100644 --- a/stable/phabricator/requirements.lock +++ b/stable/phabricator/requirements.lock @@ -5,6 +5,6 @@ dependencies: name: mariadb repository: https://kubernetes-charts.storage.googleapis.com/ tags: null - version: 0.6.2 -digest: sha256:66acb700f673b56045b00d0b65e3ab750f12941005e7631d69bd4101f51424ab -generated: 2017-05-18T13:23:43.483728614-04:00 + version: 0.6.3 +digest: sha256:99135a083bee8717224e9f5a4e151b2831bc8367b1b88075b6405dd190ac7a11 +generated: 2017-06-22T19:36:50.625054114-04:00 diff --git a/stable/phabricator/requirements.yaml b/stable/phabricator/requirements.yaml index e12e2163630d..d376de89806e 100644 --- a/stable/phabricator/requirements.yaml +++ b/stable/phabricator/requirements.yaml @@ -1,4 +1,4 @@ dependencies: - name: mariadb - version: 0.6.2 + version: 0.6.3 repository: https://kubernetes-charts.storage.googleapis.com/ diff --git a/stable/phabricator/templates/NOTES.txt b/stable/phabricator/templates/NOTES.txt index 183ef003d736..918b7142f87f 100644 --- a/stable/phabricator/templates/NOTES.txt +++ b/stable/phabricator/templates/NOTES.txt @@ -18,7 +18,7 @@ host. To configure Phabricator with the URL of your service: NOTE: It may take a few minutes for the LoadBalancer IP to be available. Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "fullname" . }}' - export APP_HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + export APP_HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") export APP_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "fullname" . }} -o jsonpath="{.data.phabricator-password}" | base64 --decode) {{- if .Values.mariadb.mariadbRootPassword }} export APP_DATABASE_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mariadb.fullname" . }} -o jsonpath="{.data.mariadb-root-password}" | base64 --decode) diff --git a/stable/phpbb/requirements.lock b/stable/phpbb/requirements.lock index fe6dadb9c313..7394c105a41f 100644 --- a/stable/phpbb/requirements.lock +++ b/stable/phpbb/requirements.lock @@ -5,6 +5,6 @@ dependencies: name: mariadb repository: https://kubernetes-charts.storage.googleapis.com/ tags: null - version: 0.6.2 -digest: sha256:66acb700f673b56045b00d0b65e3ab750f12941005e7631d69bd4101f51424ab -generated: 2017-05-18T13:23:43.81673467-04:00 + version: 0.6.3 +digest: sha256:99135a083bee8717224e9f5a4e151b2831bc8367b1b88075b6405dd190ac7a11 +generated: 2017-06-22T19:36:51.077407866-04:00 diff --git a/stable/phpbb/requirements.yaml b/stable/phpbb/requirements.yaml index e12e2163630d..d376de89806e 100644 --- a/stable/phpbb/requirements.yaml +++ b/stable/phpbb/requirements.yaml @@ -1,4 +1,4 @@ dependencies: - name: mariadb - version: 0.6.2 + version: 0.6.3 repository: https://kubernetes-charts.storage.googleapis.com/ diff --git a/stable/prestashop/Chart.yaml b/stable/prestashop/Chart.yaml index ff5555b57c62..7a515eb6cba7 100644 --- a/stable/prestashop/Chart.yaml +++ b/stable/prestashop/Chart.yaml @@ -1,5 +1,5 @@ name: prestashop -version: 0.4.10 +version: 0.4.11 appVersion: 1.7.1.2 description: A popular open source ecommerce solution. Professional tools are easily accessible to increase online sales including instant guest checkout, abandoned cart reminders and automated Email marketing. keywords: diff --git a/stable/prestashop/requirements.lock b/stable/prestashop/requirements.lock index 5f8abeb27fbe..e7a0a190d1bd 100644 --- a/stable/prestashop/requirements.lock +++ b/stable/prestashop/requirements.lock @@ -5,6 +5,6 @@ dependencies: name: mariadb repository: https://kubernetes-charts.storage.googleapis.com/ tags: null - version: 0.6.2 -digest: sha256:66acb700f673b56045b00d0b65e3ab750f12941005e7631d69bd4101f51424ab -generated: 2017-05-18T13:23:44.15934375-04:00 + version: 0.6.3 +digest: sha256:99135a083bee8717224e9f5a4e151b2831bc8367b1b88075b6405dd190ac7a11 +generated: 2017-06-22T19:36:51.543620028-04:00 diff --git a/stable/prestashop/requirements.yaml b/stable/prestashop/requirements.yaml index e12e2163630d..d376de89806e 100644 --- a/stable/prestashop/requirements.yaml +++ b/stable/prestashop/requirements.yaml @@ -1,4 +1,4 @@ dependencies: - name: mariadb - version: 0.6.2 + version: 0.6.3 repository: https://kubernetes-charts.storage.googleapis.com/ diff --git a/stable/prestashop/templates/NOTES.txt b/stable/prestashop/templates/NOTES.txt index 3c75e9428134..df19714d59b6 100644 --- a/stable/prestashop/templates/NOTES.txt +++ b/stable/prestashop/templates/NOTES.txt @@ -18,7 +18,7 @@ host. To configure PrestaShop with the URL of your service: NOTE: It may take a few minutes for the LoadBalancer IP to be available. Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "fullname" . }}' - export APP_HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + export APP_HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") export APP_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "fullname" . }} -o jsonpath="{.data.prestashop-password}" | base64 --decode) {{- if .Values.mariadb.mariadbRootPassword }} export APP_DATABASE_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mariadb.fullname" . }} -o jsonpath="{.data.mariadb-root-password}" | base64 --decode) diff --git a/stable/prometheus/Chart.yaml b/stable/prometheus/Chart.yaml index c430e1390774..b10c0e4ac223 100755 --- a/stable/prometheus/Chart.yaml +++ b/stable/prometheus/Chart.yaml @@ -1,5 +1,5 @@ name: prometheus -version: 3.0.2 +version: 3.1.0 description: Prometheus is a monitoring system and time series database. home: https://prometheus.io/ icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png diff --git a/stable/prometheus/templates/alertmanager-deployment.yaml b/stable/prometheus/templates/alertmanager-deployment.yaml index 7ee93daeb0be..30ee9ba37da8 100644 --- a/stable/prometheus/templates/alertmanager-deployment.yaml +++ b/stable/prometheus/templates/alertmanager-deployment.yaml @@ -26,6 +26,11 @@ spec: - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }} image: "{{ .Values.alertmanager.image.repository }}:{{ .Values.alertmanager.image.tag }}" imagePullPolicy: "{{ .Values.alertmanager.image.pullPolicy }}" + env: + {{- range $key, $value := .Values.alertmanager.extraEnv }} + - name: {{ $key }} + value: {{ $value }} + {{- end }} args: - --config.file=/etc/config/alertmanager.yml - --storage.path={{ .Values.alertmanager.persistentVolume.mountPath }} diff --git a/stable/prometheus/templates/alertmanager-ingress.yaml b/stable/prometheus/templates/alertmanager-ingress.yaml index 2115233d366f..42e22165e4cf 100644 --- a/stable/prometheus/templates/alertmanager-ingress.yaml +++ b/stable/prometheus/templates/alertmanager-ingress.yaml @@ -7,7 +7,7 @@ kind: Ingress metadata: {{- if .Values.alertmanager.ingress.annotations }} annotations: -{{ toYaml .Values.alertmanager.ingress.annotations | indent 4}} +{{ toYaml .Values.alertmanager.ingress.annotations | indent 4 }} {{- end }} labels: app: {{ template "prometheus.name" . }} diff --git a/stable/prometheus/templates/alertmanager-service.yaml b/stable/prometheus/templates/alertmanager-service.yaml index a2854d9cb5ba..8eaebe05e095 100644 --- a/stable/prometheus/templates/alertmanager-service.yaml +++ b/stable/prometheus/templates/alertmanager-service.yaml @@ -4,7 +4,7 @@ kind: Service metadata: {{- if .Values.alertmanager.service.annotations }} annotations: -{{ toYaml .Values.alertmanager.service.annotations | indent 4}} +{{ toYaml .Values.alertmanager.service.annotations | indent 4 }} {{- end }} labels: app: {{ template "prometheus.name" . }} @@ -13,7 +13,7 @@ metadata: heritage: {{ .Release.Service }} release: {{ .Release.Name }} {{- if .Values.alertmanager.service.labels }} -{{ toYaml .Values.alertmanager.service.labels | indent 4}} +{{ toYaml .Values.alertmanager.service.labels | indent 4 }} {{- end }} name: {{ template "prometheus.alertmanager.fullname" . }} spec: diff --git a/stable/prometheus/templates/kube-state-metrics-svc.yaml b/stable/prometheus/templates/kube-state-metrics-svc.yaml index 1738aa0f2901..e11fa06a4068 100644 --- a/stable/prometheus/templates/kube-state-metrics-svc.yaml +++ b/stable/prometheus/templates/kube-state-metrics-svc.yaml @@ -4,7 +4,7 @@ kind: Service metadata: {{- if .Values.kubeStateMetrics.service.annotations }} annotations: -{{ toYaml .Values.kubeStateMetrics.service.annotations | indent 4}} +{{ toYaml .Values.kubeStateMetrics.service.annotations | indent 4 }} {{- end }} labels: app: {{ template "prometheus.name" . }} @@ -13,7 +13,7 @@ metadata: heritage: {{ .Release.Service }} release: {{ .Release.Name }} {{- if .Values.kubeStateMetrics.service.labels }} -{{ toYaml .Values.kubeStateMetrics.service.labels | indent 4}} +{{ toYaml .Values.kubeStateMetrics.service.labels | indent 4 }} {{- end }} name: {{ template "prometheus.kubeStateMetrics.fullname" . }} spec: diff --git a/stable/prometheus/templates/node-exporter-service.yaml b/stable/prometheus/templates/node-exporter-service.yaml index 085556935b23..6af14c26c498 100644 --- a/stable/prometheus/templates/node-exporter-service.yaml +++ b/stable/prometheus/templates/node-exporter-service.yaml @@ -4,7 +4,7 @@ kind: Service metadata: {{- if .Values.nodeExporter.service.annotations }} annotations: -{{ toYaml .Values.nodeExporter.service.annotations | indent 4}} +{{ toYaml .Values.nodeExporter.service.annotations | indent 4 }} {{- end }} labels: app: {{ template "prometheus.name" . }} @@ -13,7 +13,7 @@ metadata: heritage: {{ .Release.Service }} release: {{ .Release.Name }} {{- if .Values.nodeExporter.service.labels }} -{{ toYaml .Values.nodeExporter.service.labels | indent 4}} +{{ toYaml .Values.nodeExporter.service.labels | indent 4 }} {{- end }} name: {{ template "prometheus.nodeExporter.fullname" . }} spec: diff --git a/stable/prometheus/templates/server-ingress.yaml b/stable/prometheus/templates/server-ingress.yaml index 0cb435f6b8ff..e6009d50141b 100644 --- a/stable/prometheus/templates/server-ingress.yaml +++ b/stable/prometheus/templates/server-ingress.yaml @@ -7,7 +7,7 @@ kind: Ingress metadata: {{- if .Values.server.ingress.annotations }} annotations: -{{ toYaml .Values.server.ingress.annotations | indent 4}} +{{ toYaml .Values.server.ingress.annotations | indent 4 }} {{- end }} labels: app: {{ template "prometheus.name" . }} diff --git a/stable/prometheus/templates/server-service.yaml b/stable/prometheus/templates/server-service.yaml index 50c3e86b06d0..a56910b7eb86 100644 --- a/stable/prometheus/templates/server-service.yaml +++ b/stable/prometheus/templates/server-service.yaml @@ -3,7 +3,7 @@ kind: Service metadata: {{- if .Values.server.service.annotations }} annotations: -{{ toYaml .Values.server.service.annotations | indent 4}} +{{ toYaml .Values.server.service.annotations | indent 4 }} {{- end }} labels: app: {{ template "prometheus.name" . }} @@ -12,7 +12,7 @@ metadata: heritage: {{ .Release.Service }} release: {{ .Release.Name }} {{- if .Values.server.service.labels }} -{{ toYaml .Values.server.service.labels | indent 4}} +{{ toYaml .Values.server.service.labels | indent 4 }} {{- end }} name: {{ template "prometheus.server.fullname" . }} spec: diff --git a/stable/prometheus/values.yaml b/stable/prometheus/values.yaml index d8458571e64a..0dc510c04968 100644 --- a/stable/prometheus/values.yaml +++ b/stable/prometheus/values.yaml @@ -18,6 +18,11 @@ alertmanager: ## extraArgs: {} + ## Additional alertmanager container environment variable + ## For instance to add a http_proxy + ## + extraEnv: {} + ingress: ## If true, alertmanager Ingress will be created ## @@ -510,6 +515,12 @@ serverFiles: relabel_configs: - action: labelmap regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics # Scrape config for service endpoints. # diff --git a/stable/rabbitmq/README.md b/stable/rabbitmq/README.md index f14101352b8f..e459d61cd48b 100644 --- a/stable/rabbitmq/README.md +++ b/stable/rabbitmq/README.md @@ -86,7 +86,7 @@ $ helm install --name my-release -f values.yaml stable/rabbitmq The [Bitnami RabbitMQ](https://github.com/bitnami/bitnami-docker-rabbitmq) image stores the RabbitMQ data and configurations at the `/bitnami/rabbitmq` path of the container. -The chart mounts a [Persistent Volume](kubernetes.io/docs/user-guide/persistent-volumes/) volume at this location. By default, the volume is created using dynamic volume provisioning. An existing PersistentVolumeClaim can also be defined. +The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) volume at this location. By default, the volume is created using dynamic volume provisioning. An existing PersistentVolumeClaim can also be defined. ### Existing PersistentVolumeClaims diff --git a/stable/redis/Chart.yaml b/stable/redis/Chart.yaml index d25e593dcc2c..236878710171 100644 --- a/stable/redis/Chart.yaml +++ b/stable/redis/Chart.yaml @@ -1,5 +1,5 @@ name: redis -version: 0.7.1 +version: 0.8.0 appVersion: 3.2.9 description: Open source, advanced key-value store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets and sorted sets. keywords: diff --git a/stable/redis/README.md b/stable/redis/README.md index 7d8e5ac73aea..5cc280b57122 100644 --- a/stable/redis/README.md +++ b/stable/redis/README.md @@ -47,6 +47,7 @@ The following tables lists the configurable parameters of the Redis chart and th | -------------------------- | ------------------------------------- | --------------------------------------------------------- | | `image` | Redis image | `bitnami/redis:{VERSION}` | | `imagePullPolicy` | Image pull policy | `IfNotPresent` | +| `usePassword` | Use password | `true` | | `redisPassword` | Redis password | Randomly generated | | `persistence.enabled` | Use a PVC to persist data | `true` | | `persistence.existingClaim`| Use an existing PVC to persist data | `nil` | @@ -59,6 +60,8 @@ The following tables lists the configurable parameters of the Redis chart and th | `metrics.imageTag` | Exporter image | `v0.11` | | `metrics.imagePullPolicy` | Exporter image pull policy | `IfNotPresent` | | `metrics.resources` | Exporter resource requests/limit | Memory: `256Mi`, CPU: `100m` | +| `nodeSelector` | Node labels for pod assignment | {} | +| `tolerations` | Toleration labels for pod assignment | [] | The above parameters map to the env variables defined in [bitnami/redis](http://github.com/bitnami/bitnami-docker-redis). For more information please refer to the [bitnami/redis](http://github.com/bitnami/bitnami-docker-redis) image documentation. @@ -84,7 +87,7 @@ $ helm install --name my-release -f values.yaml stable/redis The [Bitnami Redis](https://github.com/bitnami/bitnami-docker-redis) image stores the Redis data and configurations at the `/bitnami/redis` path of the container. -By default, the chart mounts a [Persistent Volume](kubernetes.io/docs/user-guide/persistent-volumes/) volume at this location. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. +By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) volume at this location. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. ### Existing PersistentVolumeClaim diff --git a/stable/redis/templates/deployment.yaml b/stable/redis/templates/deployment.yaml index d3b215086131..5591802dc32f 100644 --- a/stable/redis/templates/deployment.yaml +++ b/stable/redis/templates/deployment.yaml @@ -13,6 +13,14 @@ spec: labels: app: {{ template "fullname" . }} spec: + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} containers: - name: {{ template "fullname" . }} image: "{{ .Values.image }}" diff --git a/stable/redis/values.yaml b/stable/redis/values.yaml index 89e3f1daffa2..aedcf64fefd7 100644 --- a/stable/redis/values.yaml +++ b/stable/redis/values.yaml @@ -52,3 +52,9 @@ resources: requests: memory: 256Mi cpu: 100m + +## Node labels and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +nodeSelector: {} +tolerations: [] diff --git a/stable/redmine/Chart.yaml b/stable/redmine/Chart.yaml index 09a8358ebc3a..b1e1baa25e73 100644 --- a/stable/redmine/Chart.yaml +++ b/stable/redmine/Chart.yaml @@ -1,5 +1,5 @@ name: redmine -version: 1.1.0 +version: 1.1.1 description: A flexible project management web application. keywords: - redmine diff --git a/stable/redmine/requirements.lock b/stable/redmine/requirements.lock index f204f484e84e..f2a74e81a097 100644 --- a/stable/redmine/requirements.lock +++ b/stable/redmine/requirements.lock @@ -5,7 +5,7 @@ dependencies: name: mariadb repository: https://kubernetes-charts.storage.googleapis.com/ tags: null - version: 0.6.2 + version: 0.6.3 - condition: "" enabled: false import-values: null @@ -13,5 +13,5 @@ dependencies: repository: https://kubernetes-charts.storage.googleapis.com/ tags: null version: 0.7.1 -digest: sha256:dee3abb88198c7aacd63b3ea276a56c7c96665cd13f0e299eb883bec04e02f50 -generated: 2017-06-21T11:53:59.892629329-04:00 +digest: sha256:f9eca798ba947691e7a1cfba05808f8b3c724bb2bfcfece490d271fe551d63f4 +generated: 2017-06-22T19:36:52.056642011-04:00 diff --git a/stable/redmine/requirements.yaml b/stable/redmine/requirements.yaml index db782c256388..91a5f2c8c1dd 100644 --- a/stable/redmine/requirements.yaml +++ b/stable/redmine/requirements.yaml @@ -1,6 +1,6 @@ dependencies: - name: mariadb - version: 0.6.2 + version: 0.6.3 repository: https://kubernetes-charts.storage.googleapis.com/ condition: databaseType.mariadb - name: postgresql diff --git a/stable/redmine/templates/NOTES.txt b/stable/redmine/templates/NOTES.txt index 739110d7c689..a6fdd9802862 100644 --- a/stable/redmine/templates/NOTES.txt +++ b/stable/redmine/templates/NOTES.txt @@ -12,7 +12,7 @@ Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "fullname" . }}' - export SERVICE_IP=$(kubectl get svc {{ template "fullname" . }} --namespace {{ .Release.Namespace }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") echo http://$SERVICE_IP/ {{- else if .Values.ingress.enabled }} diff --git a/stable/redmine/templates/svc.yaml b/stable/redmine/templates/svc.yaml index e83c16d9f83d..e3ab12b5e4bd 100644 --- a/stable/redmine/templates/svc.yaml +++ b/stable/redmine/templates/svc.yaml @@ -15,6 +15,6 @@ spec: targetPort: http selector: app: {{ template "fullname" . }} - {{if eq .Values.serviceType "LoadBalancer"}} - loadBalancerSourceRanges: {{.Values.serviceLoadBalancerSourceRanges}} - {{end}} + {{ if eq .Values.serviceType "LoadBalancer" }} + loadBalancerSourceRanges: {{ .Values.serviceLoadBalancerSourceRanges }} + {{ end }} diff --git a/stable/rethinkdb/templates/rethinkdb-cluster-stateful-set.yaml b/stable/rethinkdb/templates/rethinkdb-cluster-stateful-set.yaml index 1a90bbf1e59f..2e58315ae53b 100644 --- a/stable/rethinkdb/templates/rethinkdb-cluster-stateful-set.yaml +++ b/stable/rethinkdb/templates/rethinkdb-cluster-stateful-set.yaml @@ -15,12 +15,12 @@ spec: name: "{{ template "fullname" . }}-cluster" labels: app: "{{ template "name" . }}-cluster" - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" annotations: {{- if .Values.cluster.podAnnotations }} -{{ toYaml .Values.cluster.podAnnotations | indent 8}} +{{ toYaml .Values.cluster.podAnnotations | indent 8 }} {{- end }} spec: containers: diff --git a/stable/rethinkdb/templates/rethinkdb-proxy-deployment.yaml b/stable/rethinkdb/templates/rethinkdb-proxy-deployment.yaml index 17da550fbb3f..662ef25deca8 100644 --- a/stable/rethinkdb/templates/rethinkdb-proxy-deployment.yaml +++ b/stable/rethinkdb/templates/rethinkdb-proxy-deployment.yaml @@ -14,12 +14,12 @@ spec: name: {{ template "fullname" . }}-proxy labels: app: {{ template "name" . }}-proxy - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" annotations: {{- if .Values.proxy.podAnnotations }} -{{ toYaml .Values.proxy.podAnnotations | indent 8}} +{{ toYaml .Values.proxy.podAnnotations | indent 8 }} {{- end }} spec: containers: diff --git a/stable/risk-advisor/.helmignore b/stable/risk-advisor/.helmignore new file mode 100644 index 000000000000..f0c131944441 --- /dev/null +++ b/stable/risk-advisor/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/stable/risk-advisor/Chart.yaml b/stable/risk-advisor/Chart.yaml new file mode 100644 index 000000000000..506b69470431 --- /dev/null +++ b/stable/risk-advisor/Chart.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +name: risk-advisor +description: Risk Advisor add-on module for Kubernetes +version: 1.0.0 +sources: +- https://github.com/Prytu/risk-advisor/ +maintainers: + - name: pposkrobko + email: pawel.poskrobko@icloud.com + - name: Prytu + email: mich.pryt@gmail.com + - name: GraczykowskiMichal + email: graczykowskimichal@gmail.com + - name: jackmax + email: jacek.maksymowicz@student.uw.edu.pl +icon: https://raw.githubusercontent.com/Prytu/risk-advisor/master/logo.svg diff --git a/stable/risk-advisor/README.md b/stable/risk-advisor/README.md new file mode 100644 index 000000000000..0a93bd8c06da --- /dev/null +++ b/stable/risk-advisor/README.md @@ -0,0 +1,62 @@ +# risk-advisor +Risk advisor module for Kubernetes. This project is licensed under the terms of the Apache 2.0 license. + +It allows you to check how the cluster state would change it the request of creating provided pods was accepted by Kubernetes. + +## TL;DR + +```console +$ helm install stable/risk-advisor +``` + +## Introduction +This is a tool for operators of large Kubernetes cluster to help them foresee how adding new pods to the cluster will change the cluster state, especially which nodes will they be scheduled on and if there's enough resources in the cluster. + +## Prerequisites + - Kuberentes 1.5, no guarantees for other versions, however it should work properly + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm install --name my-release stable/risk-advisor +``` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables lists the configurable parameters of the aws-cluster-autoscaler chart and their default values. + +Parameter | Description | Default +--- | --- | --- +`replicaCount` | desired number of risk-advisor pods | `1` +`image.repository` | risk-advisor container image repository | `pposkrobko/risk-advisor` +`image.tag` | risk-advisor container image tag | `v1.0.0` +`image.pullPolicy` | risk-advisor container image pull policy | `IfNotPresent` +`service.type` | service type | `NodePort` +`service.targetPort` | exposed port of risk-advisor pod| `9997` +`service.port` | in-cluster exposed port of risk-advisor service | `9997` +`service.nodePort` | exposed external port accessible from outside the cluster | `31111` + +## Usage + +Chart exposes a service with a REST API on 11111 port, accepting following endpoints: + * `/advise`: + * Accepts: a JSON table containing pod definitions + * Returns: a JSON table of scheduling results. Each result contains: + * `podName`: (string) Name of the relevant pod + * `result`: (string) `Scheduled` if the pod would be successfully scheduled, `failedScheduling` otherwise + * `message`: (string) Additional information about the result (e.g. nodes which were tried, or the reason why scheduling failed) + * `/healthz` Health check endpoint, responds with HTTP 200 if successful + + diff --git a/stable/risk-advisor/templates/NOTES.txt b/stable/risk-advisor/templates/NOTES.txt new file mode 100644 index 000000000000..799b119e5405 --- /dev/null +++ b/stable/risk-advisor/templates/NOTES.txt @@ -0,0 +1,14 @@ +The risk advisor has been installed. Risk advisor allows you to make a simulation of adding new pods to the cluster to let you know if and on what node they would be scheduled if it was a real create pod request. It should help operators of huge clusters predict the changes in cluster state. For the purposes of simulation you need to make a request on `/advise` endpoint of risk advisor service with JSON array containing pods definition. + +To get the risk advisor service URL, run these commands: + +{{- if (not (empty .Values.service.nodePort)) }} + export NODE_PORT={{ .Values.service.nodePort }} +{{- else }} + export NODE_PORT=$(kubectl get services -o jsonpath="{.spec.ports[0].nodePort}" {{ template "fullname" . }}) +{{- end }} + export NODE_IP=$(kubectl get nodes -o jsonpath="{.items[0].status.addresses[1].address}") + echo "Check how the cluster state would change after adding provided pods to the cluster by sending HTTP POST request to http://$NODE_IP:$NODE_PORT/advise with array of pod definitions" + echo "For example, using curl: curl -XPOST http://$NODE_IP:$NODE_PORT/advise -H \"Content-type: application/json\" -d @PODS_ARRAY_JSON_FILE" + +It may take a few seconds for risk advisor to respond to the request. diff --git a/stable/risk-advisor/templates/_helpers.tpl b/stable/risk-advisor/templates/_helpers.tpl new file mode 100644 index 000000000000..f0d83d2edba6 --- /dev/null +++ b/stable/risk-advisor/templates/_helpers.tpl @@ -0,0 +1,16 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/stable/risk-advisor/templates/deployment.yaml b/stable/risk-advisor/templates/deployment.yaml new file mode 100644 index 000000000000..13b3108e8d75 --- /dev/null +++ b/stable/risk-advisor/templates/deployment.yaml @@ -0,0 +1,37 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "fullname" . }} + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: "{{ template "name" . }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + replicas: {{ .Values.replicaCount }} + template: + metadata: + labels: + app: "{{ template "name" . }}" + release: "{{ .Release.Name }}" + spec: + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.service.targetPort }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.service.targetPort }} + initialDelaySeconds: 3 + periodSeconds: 3 + readinessProbe: + httpGet: + path: /healthz + port: {{ .Values.service.targetPort }} + initialDelaySeconds: 3 + periodSeconds: 3 + resources: +{{ toYaml .Values.resources | indent 12 }} diff --git a/stable/risk-advisor/templates/service.yaml b/stable/risk-advisor/templates/service.yaml new file mode 100644 index 000000000000..5167b8a345c4 --- /dev/null +++ b/stable/risk-advisor/templates/service.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "fullname" . }} + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: "{{ template "name" . }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + type: {{ .Values.service.type }} + ports: + - name: http + protocol: TCP + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + nodePort: {{ .Values.service.nodePort }} + selector: + app: "{{ template "name" . }}" + release: "{{ .Release.Name }}" diff --git a/stable/risk-advisor/values.yaml b/stable/risk-advisor/values.yaml new file mode 100644 index 000000000000..ffb72c8f22d4 --- /dev/null +++ b/stable/risk-advisor/values.yaml @@ -0,0 +1,21 @@ +# Default values for risk-advisor. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +replicaCount: 1 +image: + repository: pposkrobko/risk-advisor + tag: v1.0.0 + pullPolicy: IfNotPresent +service: + type: NodePort + port: 9997 + targetPort: 9997 + nodePort: 31111 +# resources: +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi + diff --git a/stable/rocketchat/.helmignore b/stable/rocketchat/.helmignore new file mode 100644 index 000000000000..6b8710a711f3 --- /dev/null +++ b/stable/rocketchat/.helmignore @@ -0,0 +1 @@ +.git diff --git a/stable/rocketchat/Chart.yaml b/stable/rocketchat/Chart.yaml new file mode 100644 index 000000000000..8989edb9bf14 --- /dev/null +++ b/stable/rocketchat/Chart.yaml @@ -0,0 +1,22 @@ +name: rocketchat +version: 0.0.1 +description: Prepare to take off with the ultimate chat platform, experience the next level of team communications +keywords: +- chat +- communication +- http +- web +- application +- nodejs +- javascript +- meteor +home: https://rocket.chat/ +icon: https://cdn-www.rocket.chat/images/logo/logo.svg +sources: +- https://github.com/RocketChat/Docker.Official.Image/ +maintainers: +- name: RocketChat + email: buildmaster@rocket.chat +- name: geekgonecrazy +- name: pierreozoux +engine: gotpl diff --git a/stable/rocketchat/requirements.lock b/stable/rocketchat/requirements.lock new file mode 100644 index 000000000000..887787c99310 --- /dev/null +++ b/stable/rocketchat/requirements.lock @@ -0,0 +1,11 @@ +dependencies: +- alias: "" + condition: "" + enabled: false + import-values: null + name: mongodb + repository: https://kubernetes-charts.storage.googleapis.com/ + tags: null + version: 0.4.7 +digest: sha256:e76c2327ca138151bcc776ae4b46197069026fb14f1c2386e446f91f5488593b +generated: 2017-06-29T11:26:54.116252379-05:00 diff --git a/stable/rocketchat/requirements.yaml b/stable/rocketchat/requirements.yaml new file mode 100644 index 000000000000..c609c3ae7ad8 --- /dev/null +++ b/stable/rocketchat/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: +- name: mongodb + version: 0.4.7 + repository: https://kubernetes-charts.storage.googleapis.com/ diff --git a/stable/rocketchat/templates/NOTES.txt b/stable/rocketchat/templates/NOTES.txt new file mode 100644 index 000000000000..c81cb5701010 --- /dev/null +++ b/stable/rocketchat/templates/NOTES.txt @@ -0,0 +1,27 @@ +Rocket.Chat can be accessed via port 80 on the following DNS name from within your cluster: + +- http://{{ template "fullname" . }}.{{ .Release.Namespace }} + +You can easily connect to the remote instance from your browser. Forward the webserver port to localhost:8888 + +- kubectl port-forward --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "name" . }},release={{ .Release.Name }}" -o jsonpath='{ .items[0].metadata.name }') 8888:3000 + +You can also connect to the container running Rocket.Chat. To open a shell session in the pod run the following: + +- kubectl exec -i -t --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "name" . }},release={{ .Release.Name }}" -o jsonpath='{.items[0].metadata.name}') /bin/sh + +To trail the logs for the Rocket.Chat pod run the following: + +- kubectl logs -f --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "name" . }},release={{ .Release.Name }}" -o jsonpath='{ .items[0].metadata.name }') + +{{- if .Values.ingress.enabled }} + +Rocket.Chat will be available at the URL: + +- http://{{ .Values.host }} +{{ else }} + +To expose Rocket.Chat via an Ingress you need to set host and enable ingress. + +helm install --set host=chat.yourdomain.com --set ingress.enabled=true stable/rocketchat +{{- end -}} diff --git a/stable/rocketchat/templates/_helpers.tpl b/stable/rocketchat/templates/_helpers.tpl new file mode 100644 index 000000000000..7610d1e4beb5 --- /dev/null +++ b/stable/rocketchat/templates/_helpers.tpl @@ -0,0 +1,24 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "mongodb.fullname" -}} +{{- printf "%s-%s" .Release.Name "mongodb" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/stable/rocketchat/templates/deployment.yaml b/stable/rocketchat/templates/deployment.yaml new file mode 100644 index 000000000000..8aecbf9d8fd4 --- /dev/null +++ b/stable/rocketchat/templates/deployment.yaml @@ -0,0 +1,73 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "fullname" . }} + labels: + app: {{ template "name" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + replicas: 1 + template: + metadata: + labels: + app: {{ template "name" . }} + release: "{{ .Release.Name }}" + spec: + containers: + - name: {{ template "fullname" . }} + image: "{{ .Values.image }}" + imagePullPolicy: {{ default "" .Values.imagePullPolicy | quote }} + env: + - name: DEPLOY_PLATFORM + value: helm-chart + - name: INSTANCE_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MONGO_URL + {{ if .Values.mongodb.mongodbUsername }} + valueFrom: + secretKeyRef: + name: {{ template "fullname" . }} + key: mongo-uri + {{ else }} + value: mongodb://{{ template "mongodb.fullname" . }}:27017/rocketchat + {{ end }} + {{ if .Values.host }} + - name: ROOT_URL + value: https://{{ .Values.host }} + {{ end }} + - name: MAIL_URL + valueFrom: + secretKeyRef: + name: {{ template "fullname" . }} + key: mail-url + {{- range $key, $value := .Values.config }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + ports: + - name: http + containerPort: 3000 + livenessProbe: + httpGet: + path: / + port: http + initialDelaySeconds: 20 + timeoutSeconds: 5 + resources: +{{ toYaml .Values.resources | indent 10 }} + volumeMounts: + - name: rocket-data + mountPath: /app/uploads + volumes: + - name: rocket-data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ template "fullname" . }} + {{- else }} + emptyDir: {} + {{- end }} + diff --git a/stable/rocketchat/templates/ingress.yaml b/stable/rocketchat/templates/ingress.yaml new file mode 100644 index 000000000000..8248b2f15acc --- /dev/null +++ b/stable/rocketchat/templates/ingress.yaml @@ -0,0 +1,31 @@ +{{- if .Values.ingress.enabled -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "fullname" . }} + labels: + app: {{ template "name" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + annotations: +{{ toYaml .Values.ingress.annotations | indent 4 }} +{{- if .Values.ingress.tls }} + kubernetes.io/tls-acme: "true" +{{- end }} +spec: +{{- if .Values.ingress.tls }} + tls: + - hosts: + - {{ .Values.host }} + secretName: {{ template "fullname" . }}-tls +{{- end }} + rules: + - host: {{ .Values.host }} + http: + paths: + - path: / + backend: + serviceName: {{ template "fullname" . }} + servicePort: 3000 +{{- end -}} diff --git a/stable/rocketchat/templates/pvc.yaml b/stable/rocketchat/templates/pvc.yaml new file mode 100644 index 000000000000..ae1b04a1fb83 --- /dev/null +++ b/stable/rocketchat/templates/pvc.yaml @@ -0,0 +1,21 @@ +{{- if .Values.persistence.enabled -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "fullname" . }} + labels: + app: {{ template "name" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + annotations: + {{- if .Values.persistence.storageClass }} + volume.beta.kubernetes.io/storage-class: {{ .Values.persistence.storageClass | quote }} + {{- end }} +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- end -}} diff --git a/stable/rocketchat/templates/secrets.yaml b/stable/rocketchat/templates/secrets.yaml new file mode 100644 index 000000000000..39b5b0a6c295 --- /dev/null +++ b/stable/rocketchat/templates/secrets.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "fullname" . }} + labels: + app: {{ template "name" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + mail-url: {{ printf "smtp://%s:%s@%s:%s" .Values.config.SMTP_Username .Values.config.SMTP_Password .Values.config.SMTP_Host .Values.config.SMTP_Port | b64enc | quote }} + {{ if .Values.mongodb.mongodbUsername }} + mongo-uri: {{ printf "mongodb://%s:%s@%s-mongodb:27017/%s" .Values.mongodb.mongodbUsername .Values.mongodb.mongodbPassword .Release.Name .Values.mongodb.mongodbDatabase | b64enc | quote }} + {{ end }} diff --git a/stable/rocketchat/templates/svc.yaml b/stable/rocketchat/templates/svc.yaml new file mode 100644 index 000000000000..adafe2550015 --- /dev/null +++ b/stable/rocketchat/templates/svc.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "fullname" . }} + labels: + app: {{ template "name" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + ports: + - name: http + port: 3000 + targetPort: http + selector: + app: {{ template "name" . }} + release: "{{ .Release.Name }}" diff --git a/stable/rocketchat/values.yaml b/stable/rocketchat/values.yaml new file mode 100644 index 000000000000..976ec48d0071 --- /dev/null +++ b/stable/rocketchat/values.yaml @@ -0,0 +1,85 @@ +## Rocket Chat image version +## ref: https://hub.docker.com/r/library/rocket.chat/tags/ +## +image: rocket.chat:0.56 + +## Specify a imagePullPolicy +## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' +## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images +## +# imagePullPolicy: + +## Host for the application +## +# host: + +# Main RocketChat configuration: +config: + SMTP_Host: + SMTP_Port: + SMTP_Username: + SMTP_Password: + From_Email: + Jitsi_Enabled: false + Jitsi_Domain: meet.jit.si + Jitsi_URL_Room_Prefix: RocketChat + Jitsi_Open_New_Window: false + Jitsi_Enable_Channels: false + Jitsi_Chrome_Extension: + WebRTC_Enable_Channel: false + WebRTC_Enable_Private: false + WebRTC_Enable_Direct: false + +## +## MongoDB chart configuration +## +mongodb: + ## MongoDB admin password + ### ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run + ### + ## mongodbRootPassword: + # + ## MongoDB custom user and database + ## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run + ## + # mongodbUsername: + # mongodbPassword: + # mongodbDatabase: + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## If defined, volume.beta.kubernetes.io/storage-class: + ## + # storageClass: + accessMode: ReadWriteOnce + size: 8Gi + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + ## If defined, volume.beta.kubernetes.io/storage-class: + ## + # storageClass: + accessMode: ReadWriteOnce + size: 8Gi + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +# resources: +# requests: +# memory: 512Mi +# cpu: 300m + +## Configure the ingress object to hook into existing infastructure +### ref : http://kubernetes.io/docs/user-guide/ingress/ +### +ingress: + enabled: false + tls: false + annotations: + kubernetes.io/ingress.class: "nginx" diff --git a/stable/selenium/Chart.yaml b/stable/selenium/Chart.yaml index 984e1f2ce609..b525dec47565 100644 --- a/stable/selenium/Chart.yaml +++ b/stable/selenium/Chart.yaml @@ -1,5 +1,6 @@ name: selenium -version: 0.1.1 +version: 0.1.2 +appVersion: 3.4.0 description: Chart for selenium grid keywords: - qa @@ -8,6 +9,6 @@ icon: http://docs.seleniumhq.org/images/big-logo.png sources: - https://github.com/SeleniumHQ/docker-selenium maintainers: - - name: Philip Champon (flah00) + - name: flah00 email: techops@adaptly.com engine: gotpl diff --git a/stable/selenium/templates/hub-deployment.yaml b/stable/selenium/templates/hub-deployment.yaml index 64c7fb5c3fc9..6e8c5fd6af45 100644 --- a/stable/selenium/templates/hub-deployment.yaml +++ b/stable/selenium/templates/hub-deployment.yaml @@ -37,6 +37,7 @@ spec: httpGet: path: /grid/console port: {{ .Values.hub.port }} + timeoutSeconds: {{ .Values.hub.readinessTimeout }} env: - name: JAVA_OPTS value: {{ default "" .Values.hub.javaOpts | quote }} diff --git a/stable/selenium/values.yaml b/stable/selenium/values.yaml index 4feaf4ba6d07..8fd406399b26 100644 --- a/stable/selenium/values.yaml +++ b/stable/selenium/values.yaml @@ -5,7 +5,7 @@ hub: ## The tag for the image ## ref: https://hub.docker.com/r/selenium/hub/tags/ - tag: "2.53.1" + tag: "3.4.0" ## Specify a imagePullPolicy ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images @@ -14,6 +14,9 @@ hub: ## The port which the hub listens on port: 4444 + ## Timeout for probe Hub readiness via HTTP request on Hub console + readinessTimeout: 1 + ## Set the JAVA_OPTS environment variable ## If you find your selenium hub is OOMKilled, try adding -XX:+UseSerialGC javaOpts: "-Xmx400m" @@ -79,7 +82,7 @@ chrome: ## The tag for the image ## ref: https://hub.docker.com/r/selenium/hub/tags/ - tag: "2.53.1" + tag: "3.4.0" ## Specify a imagePullPolicy ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images @@ -150,7 +153,7 @@ chromeDebug: ## The tag for the image ## ref: https://hub.docker.com/r/selenium/hub/tags/ - tag: "2.53.1" + tag: "3.4.0" ## Specify a imagePullPolicy ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images @@ -221,7 +224,7 @@ firefox: ## The tag for the image ## ref: https://hub.docker.com/r/selenium/hub/tags/ - tag: "2.53.1" + tag: "3.4.0" ## Specify a imagePullPolicy ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images @@ -279,7 +282,7 @@ firefoxDebug: ## The tag for the image ## ref: https://hub.docker.com/r/selenium/hub/tags/ - tag: "2.53.1" + tag: "3.4.0" ## Specify a imagePullPolicy ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images diff --git a/stable/sensu/templates/deployment.yaml b/stable/sensu/templates/deployment.yaml index 9e6f044ba96f..db5c0619efad 100644 --- a/stable/sensu/templates/deployment.yaml +++ b/stable/sensu/templates/deployment.yaml @@ -1,13 +1,13 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: - name: {{template "fullname" . }} + name: {{ template "fullname" . }} labels: heritage: {{ .Release.Service | quote }} release: {{ .Release.Name | quote }} chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" spec: - replicas: {{ .Values.replicaCount }} + replicas: {{ .Values.replicaCount }} template: metadata: labels: @@ -26,45 +26,45 @@ spec: - name: API_HOST value: localhost - name: API_PORT - value: '4567' - - name: REDIS_HOST + value: '4567' + - name: REDIS_HOST value: {{ template "redis.fullname" . }} - name: REDIS_PASSWORD valueFrom: secretKeyRef: name: {{ template "redis.fullname" . }} - key: redis-password - - name: REDIS_DB + key: redis-password + - name: REDIS_DB value: {{ .Values.REDIS_DB | quote }} - - name: REDIS_AUTO_RECONNECT - value: {{ .Values.REDIS_AUTO_RECONNECT | quote}} - - name: REDIS_RECONNECT_ON_ERROR + - name: REDIS_AUTO_RECONNECT + value: {{ .Values.REDIS_AUTO_RECONNECT | quote }} + - name: REDIS_RECONNECT_ON_ERROR value: {{ .Values.REDIS_RECONNECT_ON_ERROR | quote }} - - name: REDIS_PORT - value: {{ .Values.REDIS_PORT | quote }} + - name: REDIS_PORT + value: {{ .Values.REDIS_PORT | quote }} - name: api image: "{{ .Values.image }}:{{ .Values.imageTag }}" imagePullPolicy: {{ .Values.pullPolicy }} args: - - api + - api resources: {{ toYaml .Values.api.resources | indent 10 }} env: - - name: REDIS_HOST + - name: REDIS_HOST value: {{ template "redis.fullname" . }} - name: REDIS_PASSWORD valueFrom: secretKeyRef: name: {{ template "redis.fullname" . }} - key: redis-password - - name: REDIS_DB + key: redis-password + - name: REDIS_DB value: {{ .Values.REDIS_DB | quote }} - - name: REDIS_AUTO_RECONNECT - value: {{ .Values.REDIS_AUTO_RECONNECT | quote}} - - name: REDIS_RECONNECT_ON_ERROR + - name: REDIS_AUTO_RECONNECT + value: {{ .Values.REDIS_AUTO_RECONNECT | quote }} + - name: REDIS_RECONNECT_ON_ERROR value: {{ .Values.REDIS_RECONNECT_ON_ERROR | quote }} - - name: REDIS_PORT - value: {{ .Values.REDIS_PORT | quote }} + - name: REDIS_PORT + value: {{ .Values.REDIS_PORT | quote }} ports: - containerPort: 4567 readinessProbe: @@ -78,7 +78,7 @@ spec: path: /info port: 4567 initialDelaySeconds: 30 - timeoutSeconds: 1 + timeoutSeconds: 1 diff --git a/stable/sensu/templates/svc.yaml b/stable/sensu/templates/svc.yaml index 1d9efbd85369..ca1498899993 100644 --- a/stable/sensu/templates/svc.yaml +++ b/stable/sensu/templates/svc.yaml @@ -10,11 +10,11 @@ metadata: router.deis.io/routable: "true" annotations: router.deis.io/domains: {{ .Values.deis.domains | quote }} - {{end}} + {{ end }} spec: - type: {{.Values.serviceType }} + type: {{ .Values.serviceType }} ports: - port: {{ .Values.httpPort }} targetPort: 4567 selector: - app: {{ template "fullname" . }} \ No newline at end of file + app: {{ template "fullname" . }} diff --git a/stable/sentry/Chart.yaml b/stable/sentry/Chart.yaml index fbff726bba9e..61706ce1ba49 100644 --- a/stable/sentry/Chart.yaml +++ b/stable/sentry/Chart.yaml @@ -8,7 +8,7 @@ keywords: sources: - https://github.com/getsentry/sentry home: https://sentry.io/ -icon: https://a0wx592cvgzripj.global.ssl.fastly.net/_static/6571516f8aed42e4172c9c439ba814c6/getsentry/images/branding/png/sentry-glyph-black.png +icon: https://sentry.io/_static/getsentry/images/branding/png/sentry-glyph-black.png maintainers: - name: rothgar email: justin@linux.com diff --git a/stable/sentry/README.md b/stable/sentry/README.md index 6e90f7ffe679..f8eed410010f 100644 --- a/stable/sentry/README.md +++ b/stable/sentry/README.md @@ -5,7 +5,7 @@ ## TL;DR; ```console -$ helm install --wait incubator/sentry +$ helm install --wait stable/sentry ``` ## Introduction @@ -19,6 +19,7 @@ It also packages the [PostgreSQL](https://github.com/kubernetes/charts/tree/mast ## Prerequisites - Kubernetes 1.4+ with Beta APIs enabled +- helm >= v2.3.0 to run "weighted" hooks in right order. - PV provisioner support in the underlying infrastructure (with persistence storage enabled) ## Installing the Chart @@ -26,7 +27,7 @@ It also packages the [PostgreSQL](https://github.com/kubernetes/charts/tree/mast To install the chart with the release name `my-release`: ```console -$ helm install --name my-release --wait incubator/sentry +$ helm install --name my-release --wait stable/sentry ``` > **Note**: We have to use the --wait flag for initial creation because the database creationg takes longer than the default 300 seconds @@ -74,12 +75,13 @@ The following tables lists the configurable parameters of the Sentry chart and t | `service.externalPort` | Kubernetes external service port | `9000` | | `service.internalPort` | Kubernetes internal service port | `9000` | | `ingress.enabled` | Enable ingress controller resource | `false` | -| `ingress.hostname` | URL to address your Sentry installation | `sentry.local` | -| `ingress.tls` | Ingress TLS configuration | `[]` | +| `ingress.annotations` | Ingress annotations | `{}` | +| `ingress.hostname` | URL to address your Sentry installation | `sentry.local` | +| `ingress.tls` | Ingress TLS configuration | `[]` | | `persistence.enabled` | Enable persistence using PVC | `true` | | `persistence.storageClass` | PVC Storage Class | `nil` (uses alpha storage class annotation) | | `persistence.accessMode` | PVC Access Mode | `ReadWriteOnce` | -| `persistence.size` | PVC Storage Request | `10Gi` | | +| `persistence.size` | PVC Storage Request | `10Gi` | Dependent charts can also have values overwritten. Preface values with postgresql.* or redis.* @@ -88,13 +90,13 @@ Specify each parameter using the `--set key=value[,key=value]` argument to `helm ```console $ helm install --name my-release \ --set persistence.enabled=false,email.host=email \ - incubator/sentry + stable/sentry ``` Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, ```console -$ helm install --name my-release -f values.yaml incubator/sentry +$ helm install --name my-release -f values.yaml stable/sentry ``` > **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/stable/sentry/templates/cron-deployment.yaml b/stable/sentry/templates/cron-deployment.yaml index 065fd11fd24a..0656d5c9622f 100644 --- a/stable/sentry/templates/cron-deployment.yaml +++ b/stable/sentry/templates/cron-deployment.yaml @@ -39,7 +39,7 @@ spec: name: {{ template "postgresql.fullname" . }} key: postgres-password - name: SENTRY_POSTGRES_HOST - value: {{ template "postgresql.fullname" .}} + value: {{ template "postgresql.fullname" . }} - name: SENTRY_POSTRGES_PORT value: "5432" - name: SENTRY_REDIS_PASSWORD @@ -48,7 +48,7 @@ spec: name: {{ template "redis.fullname" . }} key: redis-password - name: SENTRY_REDIS_HOST - value: {{ template "redis.fullname" .}} + value: {{ template "redis.fullname" . }} - name: SENTRY_REDIS_PORT value: "6379" - name: SENTRY_EMAIL_HOST diff --git a/stable/sentry/templates/hooks/db-init.job.yaml b/stable/sentry/templates/hooks/db-init.job.yaml index 928130323440..85352d40faf9 100644 --- a/stable/sentry/templates/hooks/db-init.job.yaml +++ b/stable/sentry/templates/hooks/db-init.job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: "{{.Release.Name}}-db-init" + name: "{{ .Release.Name }}-db-init" labels: app: {{ template "fullname" . }} chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" @@ -16,7 +16,7 @@ metadata: spec: template: metadata: - name: "{{.Release.Name}}-db-init" + name: "{{ .Release.Name }}-db-init" labels: app: {{ template "fullname" . }} release: "{{ .Release.Name }}" @@ -42,7 +42,7 @@ spec: name: {{ template "postgresql.fullname" . }} key: postgres-password - name: SENTRY_POSTGRES_HOST - value: {{ template "postgresql.fullname" .}} + value: {{ template "postgresql.fullname" . }} - name: SENTRY_POSTRGES_PORT value: "5432" - name: SENTRY_REDIS_PASSWORD @@ -51,7 +51,7 @@ spec: name: {{ template "redis.fullname" . }} key: redis-password - name: SENTRY_REDIS_HOST - value: {{ template "redis.fullname" .}} + value: {{ template "redis.fullname" . }} - name: SENTRY_REDIS_PORT value: "6379" - name: SENTRY_EMAIL_HOST diff --git a/stable/sentry/templates/hooks/user-create.job.yaml b/stable/sentry/templates/hooks/user-create.job.yaml index d0240dd12976..80f631897740 100644 --- a/stable/sentry/templates/hooks/user-create.job.yaml +++ b/stable/sentry/templates/hooks/user-create.job.yaml @@ -1,7 +1,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: "{{.Release.Name}}-user-create" + name: "{{ .Release.Name }}-user-create" labels: app: {{ template "fullname" . }} chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" @@ -15,7 +15,7 @@ metadata: spec: template: metadata: - name: "{{.Release.Name}}-user-create" + name: "{{ .Release.Name }}-user-create" labels: app: {{ template "fullname" . }} release: "{{ .Release.Name }}" @@ -41,7 +41,7 @@ spec: name: {{ template "postgresql.fullname" . }} key: postgres-password - name: SENTRY_POSTGRES_HOST - value: {{ template "postgresql.fullname" .}} + value: {{ template "postgresql.fullname" . }} - name: SENTRY_POSTRGES_PORT value: "5432" - name: SENTRY_REDIS_PASSWORD @@ -50,7 +50,7 @@ spec: name: {{ template "redis.fullname" . }} key: redis-password - name: SENTRY_REDIS_HOST - value: {{ template "redis.fullname" .}} + value: {{ template "redis.fullname" . }} - name: SENTRY_REDIS_PORT value: "6379" - name: SENTRY_EMAIL_HOST diff --git a/stable/sentry/templates/ingress.yaml b/stable/sentry/templates/ingress.yaml index ef7c7b872240..a8ac2945246e 100644 --- a/stable/sentry/templates/ingress.yaml +++ b/stable/sentry/templates/ingress.yaml @@ -20,7 +20,7 @@ spec: - path: / backend: serviceName: {{ template "fullname" . }} - servicePort: 80 + servicePort: {{ .Values.service.externalPort }} {{- if .Values.ingress.tls }} tls: {{ toYaml .Values.ingress.tls | indent 4 }} diff --git a/stable/sentry/templates/web-deployment.yaml b/stable/sentry/templates/web-deployment.yaml index 5ef48db39203..39c678052ee8 100644 --- a/stable/sentry/templates/web-deployment.yaml +++ b/stable/sentry/templates/web-deployment.yaml @@ -38,7 +38,7 @@ spec: name: {{ template "postgresql.fullname" . }} key: postgres-password - name: SENTRY_POSTGRES_HOST - value: {{ template "postgresql.fullname" .}} + value: {{ template "postgresql.fullname" . }} - name: SENTRY_POSTRGES_PORT value: "5432" - name: SENTRY_REDIS_PASSWORD @@ -47,7 +47,7 @@ spec: name: {{ template "redis.fullname" . }} key: redis-password - name: SENTRY_REDIS_HOST - value: {{ template "redis.fullname" .}} + value: {{ template "redis.fullname" . }} - name: SENTRY_REDIS_PORT value: "6379" - name: SENTRY_EMAIL_HOST diff --git a/stable/sentry/templates/workers-deployment.yaml b/stable/sentry/templates/workers-deployment.yaml index d895e78dc78f..87285973bdfa 100644 --- a/stable/sentry/templates/workers-deployment.yaml +++ b/stable/sentry/templates/workers-deployment.yaml @@ -39,7 +39,7 @@ spec: name: {{ template "postgresql.fullname" . }} key: postgres-password - name: SENTRY_POSTGRES_HOST - value: {{ template "postgresql.fullname" .}} + value: {{ template "postgresql.fullname" . }} - name: SENTRY_POSTRGES_PORT value: "5432" - name: SENTRY_REDIS_PASSWORD @@ -48,7 +48,7 @@ spec: name: {{ template "redis.fullname" . }} key: redis-password - name: SENTRY_REDIS_HOST - value: {{ template "redis.fullname" .}} + value: {{ template "redis.fullname" . }} - name: SENTRY_REDIS_PORT value: "6379" - name: SENTRY_EMAIL_HOST diff --git a/stable/sentry/values.yaml b/stable/sentry/values.yaml index 1b11efc1d871..2767a0c452d3 100644 --- a/stable/sentry/values.yaml +++ b/stable/sentry/values.yaml @@ -89,13 +89,14 @@ ingress: ## Ingress annotations ## - # annotations: + annotations: {} # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' ## Ingress TLS configuration ## Secrets must be manually created in the namespace ## - # tls: + tls: [] # - secretName: sentry.local-tls # hosts: # - sentry.local diff --git a/stable/spark/templates/spark-master-deployment.yaml b/stable/spark/templates/spark-master-deployment.yaml index 2a526e22597d..d96c4db5a4e7 100644 --- a/stable/spark/templates/spark-master-deployment.yaml +++ b/stable/spark/templates/spark-master-deployment.yaml @@ -3,32 +3,32 @@ kind: Service metadata: name: {{ template "master-fullname" . }} labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Master.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Master.Component }}" spec: ports: - - port: {{.Values.Master.ServicePort}} - targetPort: {{.Values.Master.ContainerPort}} + - port: {{ .Values.Master.ServicePort }} + targetPort: {{ .Values.Master.ContainerPort }} selector: - component: "{{.Release.Name}}-{{.Values.Master.Component}}" + component: "{{ .Release.Name }}-{{ .Values.Master.Component }}" --- apiVersion: v1 kind: Service metadata: name: {{ template "webui-fullname" . }} labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Master.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Master.Component }}" spec: ports: - - port: {{.Values.WebUi.ServicePort}} - targetPort: {{.Values.WebUi.ContainerPort}} + - port: {{ .Values.WebUi.ServicePort }} + targetPort: {{ .Values.WebUi.ContainerPort }} selector: - component: "{{.Release.Name}}-{{.Values.Master.Component}}" + component: "{{ .Release.Name }}-{{ .Values.Master.Component }}" type: "LoadBalancer" --- apiVersion: extensions/v1beta1 @@ -36,37 +36,37 @@ kind: Deployment metadata: name: {{ template "master-fullname" . }} labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Master.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Master.Component }}" spec: - replicas: {{default 1 .Values.Master.Replicas}} + replicas: {{ default 1 .Values.Master.Replicas }} strategy: type: RollingUpdate selector: matchLabels: - component: "{{.Release.Name}}-{{.Values.Master.Component}}" + component: "{{ .Release.Name }}-{{ .Values.Master.Component }}" template: metadata: labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Master.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Master.Component }}" spec: containers: - name: {{ template "master-fullname" . }} - image: "{{.Values.Master.Image}}:{{.Values.Master.ImageTag}}" + image: "{{ .Values.Master.Image }}:{{ .Values.Master.ImageTag }}" command: ["/bin/sh","-c"] - args: ["echo $(hostname -i) {{ template "master-fullname" . }} >> /etc/hosts; /opt/spark/bin/spark-class org.apache.spark.deploy.master.Master --ip {{ template "master-fullname" . }} --port {{.Values.Master.ServicePort}} --webui-port {{.Values.WebUi.ContainerPort}}"] + args: ["echo $(hostname -i) {{ template "master-fullname" . }} >> /etc/hosts; /opt/spark/bin/spark-class org.apache.spark.deploy.master.Master --ip {{ template "master-fullname" . }} --port {{ .Values.Master.ServicePort }} --webui-port {{ .Values.WebUi.ContainerPort }}"] ports: - - containerPort: {{.Values.Master.ContainerPort}} - - containerPort: {{.Values.WebUi.ContainerPort}} + - containerPort: {{ .Values.Master.ContainerPort }} + - containerPort: {{ .Values.WebUi.ContainerPort }} resources: requests: - cpu: "{{.Values.Master.Cpu}}" - memory: "{{.Values.Master.Memory}}" + cpu: "{{ .Values.Master.Cpu }}" + memory: "{{ .Values.Master.Memory }}" env: - name: SPARK_DAEMON_MEMORY value: {{ default "1g" .Values.Master.DaemonMemory | quote }} diff --git a/stable/spark/templates/spark-worker-deployment.yaml b/stable/spark/templates/spark-worker-deployment.yaml index 95010e1b14c7..0cd6689c5778 100644 --- a/stable/spark/templates/spark-worker-deployment.yaml +++ b/stable/spark/templates/spark-worker-deployment.yaml @@ -3,35 +3,35 @@ kind: Deployment metadata: name: {{ template "worker-fullname" . }} labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Worker.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Worker.Component }}" spec: - replicas: {{default 1 .Values.Worker.Replicas}} + replicas: {{ default 1 .Values.Worker.Replicas }} strategy: type: RollingUpdate selector: matchLabels: - component: "{{.Release.Name}}-{{.Values.Worker.Component}}" + component: "{{ .Release.Name }}-{{ .Values.Worker.Component }}" template: metadata: labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Worker.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Worker.Component }}" spec: containers: - name: {{ template "worker-fullname" . }} - image: "{{.Values.Worker.Image}}:{{.Values.Worker.ImageTag}}" - command: ["/opt/spark/bin/spark-class", "org.apache.spark.deploy.worker.Worker", "spark://{{ template "master-fullname" . }}:{{.Values.Master.ServicePort}}", "--webui-port", "{{.Values.Worker.ContainerPort}}"] + image: "{{ .Values.Worker.Image }}:{{ .Values.Worker.ImageTag }}" + command: ["/opt/spark/bin/spark-class", "org.apache.spark.deploy.worker.Worker", "spark://{{ template "master-fullname" . }}:{{ .Values.Master.ServicePort }}", "--webui-port", "{{ .Values.Worker.ContainerPort }}"] ports: - - containerPort: {{.Values.Worker.ContainerPort}} + - containerPort: {{ .Values.Worker.ContainerPort }} resources: requests: - cpu: "{{.Values.Worker.Cpu}}" - memory: "{{.Values.Worker.Memory}}" + cpu: "{{ .Values.Worker.Cpu }}" + memory: "{{ .Values.Worker.Memory }}" env: - name: SPARK_DAEMON_MEMORY value: {{ default "1g" .Values.Worker.DaemonMemory | quote }} diff --git a/stable/spark/templates/spark-zeppelin-deployment.yaml b/stable/spark/templates/spark-zeppelin-deployment.yaml index ce709e7b68a1..21dff346c07a 100644 --- a/stable/spark/templates/spark-zeppelin-deployment.yaml +++ b/stable/spark/templates/spark-zeppelin-deployment.yaml @@ -3,16 +3,16 @@ kind: Service metadata: name: {{ template "zeppelin-fullname" . }} labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Zeppelin.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Zeppelin.Component }}" spec: ports: - - port: {{.Values.Zeppelin.ServicePort}} - targetPort: {{.Values.Zeppelin.ContainerPort}} + - port: {{ .Values.Zeppelin.ServicePort }} + targetPort: {{ .Values.Zeppelin.ContainerPort }} selector: - component: "{{.Release.Name}}-{{.Values.Zeppelin.Component}}" + component: "{{ .Release.Name }}-{{ .Values.Zeppelin.Component }}" type: "LoadBalancer" --- apiVersion: extensions/v1beta1 @@ -20,32 +20,32 @@ kind: Deployment metadata: name: {{ template "zeppelin-fullname" . }} labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Zeppelin.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Zeppelin.Component }}" spec: - replicas: {{default 1 .Values.Zeppelin.Replicas}} + replicas: {{ default 1 .Values.Zeppelin.Replicas }} strategy: type: RollingUpdate selector: matchLabels: - component: "{{.Release.Name}}-{{.Values.Zeppelin.Component}}" + component: "{{ .Release.Name }}-{{ .Values.Zeppelin.Component }}" template: metadata: labels: - heritage: {{.Release.Service | quote }} - release: {{.Release.Name | quote }} - chart: "{{.Chart.Name}}-{{.Chart.Version}}" - component: "{{.Release.Name}}-{{.Values.Zeppelin.Component}}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + component: "{{ .Release.Name }}-{{ .Values.Zeppelin.Component }}" spec: containers: - name: {{ template "zeppelin-fullname" . }} - image: "{{.Values.Zeppelin.Image}}:{{.Values.Zeppelin.ImageTag}}" + image: "{{ .Values.Zeppelin.Image }}:{{ .Values.Zeppelin.ImageTag }}" command: ["/bin/sh","-c"] - args: ["sed -i.bak s/spark-master:7077/{{ template "master-fullname" . }}:{{.Values.Master.ServicePort}}/g /opt/zeppelin/conf/zeppelin-env.sh; /opt/zeppelin/bin/docker-zeppelin.sh"] + args: ["sed -i.bak s/spark-master:7077/{{ template "master-fullname" . }}:{{ .Values.Master.ServicePort }}/g /opt/zeppelin/conf/zeppelin-env.sh; /opt/zeppelin/bin/docker-zeppelin.sh"] ports: - - containerPort: {{.Values.Zeppelin.ContainerPort}} + - containerPort: {{ .Values.Zeppelin.ContainerPort }} resources: requests: - cpu: "{{.Values.Zeppelin.Cpu}}" + cpu: "{{ .Values.Zeppelin.Cpu }}" diff --git a/stable/spartakus/templates/deployment.yaml b/stable/spartakus/templates/deployment.yaml index 45c0ea191352..1eb744c3c132 100644 --- a/stable/spartakus/templates/deployment.yaml +++ b/stable/spartakus/templates/deployment.yaml @@ -13,7 +13,7 @@ spec: metadata: {{- if .Values.podAnnotations }} annotations: -{{ toYaml .Values.podAnnotations | indent 8}} +{{ toYaml .Values.podAnnotations | indent 8 }} {{- end }} labels: app: {{ template "name" . }} diff --git a/stable/spinnaker/templates/configmap/jenkins-config.yaml b/stable/spinnaker/templates/configmap/jenkins-config.yaml index 7b04c4fef60d..bd99f8d2ff59 100644 --- a/stable/spinnaker/templates/configmap/jenkins-config.yaml +++ b/stable/spinnaker/templates/configmap/jenkins-config.yaml @@ -39,10 +39,10 @@ data: 2147483647 - {{.Values.jenkins.Agent.Cpu}} - {{.Values.jenkins.Agent.Memory}} - {{.Values.jenkins.Agent.Cpu}} - {{.Values.jenkins.Agent.Memory}} + {{ .Values.jenkins.Agent.Cpu }} + {{ .Values.jenkins.Agent.Memory }} + {{ .Values.jenkins.Agent.Cpu }} + {{ .Values.jenkins.Agent.Memory }} /usr/bin/docker diff --git a/stable/spinnaker/templates/configmap/s3-config.yaml b/stable/spinnaker/templates/configmap/s3-config.yaml index 75cfc2aaec33..e08f451a7613 100644 --- a/stable/spinnaker/templates/configmap/s3-config.yaml +++ b/stable/spinnaker/templates/configmap/s3-config.yaml @@ -3,7 +3,7 @@ kind: ConfigMap metadata: name: {{ template "fullname" . }}-s3-config labels: - app: {{template "fullname" .}} + app: {{ template "fullname" . }} data: credentials: | [default] diff --git a/stable/spinnaker/templates/configmap/spinnaker-config.yaml b/stable/spinnaker/templates/configmap/spinnaker-config.yaml index 29a6c23b22ff..95622751c363 100644 --- a/stable/spinnaker/templates/configmap/spinnaker-config.yaml +++ b/stable/spinnaker/templates/configmap/spinnaker-config.yaml @@ -108,17 +108,17 @@ data: challengeDestructiveActionsEnvironments: ${providers.aws.primaryCredentials.name}, ${providers.google.primaryCredentials.name}, ${providers.cf.primaryCredentials.name}, ${providers.azure.primaryCredentials.name} echo.yml: | -{{.Files.Get "config/echo.yml" | indent 4}} +{{ .Files.Get "config/echo.yml" | indent 4 }} echo-local.yml: | -{{.Files.Get "config/echo-local.yml" | indent 4}} +{{ .Files.Get "config/echo-local.yml" | indent 4 }} fiat.yml: | -{{.Files.Get "config/fiat.yml" | indent 4}} +{{ .Files.Get "config/fiat.yml" | indent 4 }} fiat.yml-local: | -{{.Files.Get "config/fiat-local.yml" | indent 4}} +{{ .Files.Get "config/fiat-local.yml" | indent 4 }} front50.yml: | -{{.Files.Get "config/front50.yml" | indent 4}} +{{ .Files.Get "config/front50.yml" | indent 4 }} front50-local.yml: | -{{.Files.Get "config/front50-local.yml" | indent 4}} +{{ .Files.Get "config/front50-local.yml" | indent 4 }} gate.yml: | server: port: ${services.gate.port:8084} @@ -153,23 +153,23 @@ data: redis: connection: ${services.redis.connection} igor.yml: | -{{.Files.Get "config/igor.yml" | indent 4}} +{{ .Files.Get "config/igor.yml" | indent 4 }} igor-local.yml: | -{{.Files.Get "config/igor-local.yml" | indent 4}} +{{ .Files.Get "config/igor-local.yml" | indent 4 }} orca.yml: | -{{.Files.Get "config/orca.yml" | indent 4}} +{{ .Files.Get "config/orca.yml" | indent 4 }} orca-local.yml: | -{{.Files.Get "config/orca-local.yml" | indent 4}} +{{ .Files.Get "config/orca-local.yml" | indent 4 }} rosco.yml: | -{{.Files.Get "config/rosco.yml" | indent 4}} +{{ .Files.Get "config/rosco.yml" | indent 4 }} rosco-local.yml: | -{{.Files.Get "config/rosco-local.yml" | indent 4}} +{{ .Files.Get "config/rosco-local.yml" | indent 4 }} settings.js: | 'use strict'; var feedbackUrl = 'http://localhost'; var gateHost = '{{ .Values.deck.protocol }}://{{ .Values.deck.host }}:{{ .Values.deck.port }}/gate'; - var bakeryDetailUrl = gateHost + '/bakery/logs/global/{{"{{"}}context.status.id{{"}}"}}'; + var bakeryDetailUrl = gateHost + '/bakery/logs/global/{{ "{{ " }}context.status.id{{ " }}" }}'; window.spinnakerSettings = { defaultProviders: ['kubernetes'], @@ -219,7 +219,7 @@ data: }, }; spinnaker.yml: | -{{.Files.Get "config/spinnaker.yml" | indent 4}} +{{ .Files.Get "config/spinnaker.yml" | indent 4 }} clouddriver-local.yml: | server: port: ${services.clouddriver.port:7002} diff --git a/stable/spinnaker/templates/deployments/igor.yaml b/stable/spinnaker/templates/deployments/igor.yaml index 0bac771d69b5..fcc28406bf3f 100644 --- a/stable/spinnaker/templates/deployments/igor.yaml +++ b/stable/spinnaker/templates/deployments/igor.yaml @@ -21,7 +21,7 @@ spec: pod.alpha.kubernetes.io/init-containers: '[ { "name": "copy-default-config", - "image": "{{.Values.jenkins.Master.Image}}:{{.Values.jenkins.Master.ImageTag}}", + "image": "{{ .Values.jenkins.Master.Image }}:{{ .Values.jenkins.Master.ImageTag }}", "command": ["cp", "-n", "/var/jenkins_config/config.xml", "/var/jenkins_home"], "imagePullPolicy": "IfNotPresent", "volumeMounts": [ @@ -53,20 +53,20 @@ spec: initialDelaySeconds: 20 timeoutSeconds: 1 - name: jenkins-master - image: "{{.Values.jenkins.Master.Image}}:{{.Values.jenkins.Master.ImageTag}}" - imagePullPolicy: "{{.Values.jenkins.Master.ImagePullPolicy}}" + image: "{{ .Values.jenkins.Master.Image }}:{{ .Values.jenkins.Master.ImageTag }}" + imagePullPolicy: "{{ .Values.jenkins.Master.ImagePullPolicy }}" env: - name: JAVA_OPTS - value: "{{ default "" .Values.jenkins.Master.JavaOpts}}" + value: "{{ default "" .Values.jenkins.Master.JavaOpts }}" ports: - - containerPort: {{.Values.jenkins.Master.ContainerPort}} + - containerPort: {{ .Values.jenkins.Master.ContainerPort }} name: http - - containerPort: {{.Values.jenkins.Master.SlaveListenerPort}} + - containerPort: {{ .Values.jenkins.Master.SlaveListenerPort }} name: slavelistener resources: requests: - cpu: "{{.Values.jenkins.Master.Cpu}}" - memory: "{{.Values.jenkins.Master.Memory}}" + cpu: "{{ .Values.jenkins.Master.Cpu }}" + memory: "{{ .Values.jenkins.Master.Memory }}" volumeMounts: - mountPath: /var/jenkins_home name: jenkins-home diff --git a/stable/spinnaker/templates/hooks/create-bucket.yaml b/stable/spinnaker/templates/hooks/create-bucket.yaml index 2d7253eef7d8..428a41e8459b 100644 --- a/stable/spinnaker/templates/hooks/create-bucket.yaml +++ b/stable/spinnaker/templates/hooks/create-bucket.yaml @@ -1,7 +1,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: "{{.Release.Name}}-create-bucket" + name: "{{ .Release.Name }}-create-bucket" labels: app: {{ template "fullname" . }} component: minio @@ -10,7 +10,7 @@ metadata: spec: template: metadata: - name: "{{.Release.Name}}" + name: "{{ .Release.Name }}" labels: app: {{ template "fullname" . }} component: minio @@ -22,4 +22,4 @@ spec: command: - sh - -c - - "mc config host add {{.Release.Name}}-minio http://{{.Release.Name}}-minio-svc:9000 {{ .Values.minio.accessKey }} {{ .Values.minio.secretKey }} S3v4 && mc mb {{.Release.Name}}-minio/spinnaker" + - "mc config host add {{ .Release.Name }}-minio http://{{ .Release.Name }}-minio-svc:9000 {{ .Values.minio.accessKey }} {{ .Values.minio.secretKey }} S3v4 && mc mb {{ .Release.Name }}-minio/spinnaker" diff --git a/stable/spinnaker/templates/hooks/delete-jobs.yaml b/stable/spinnaker/templates/hooks/delete-jobs.yaml index e1307372def2..369c13706f01 100644 --- a/stable/spinnaker/templates/hooks/delete-jobs.yaml +++ b/stable/spinnaker/templates/hooks/delete-jobs.yaml @@ -1,7 +1,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: "{{.Release.Name}}-delete-jobs" + name: "{{ .Release.Name }}-delete-jobs" labels: app: {{ template "fullname" . }} component: spinnaker @@ -10,7 +10,7 @@ metadata: spec: template: metadata: - name: "{{.Release.Name}}" + name: "{{ .Release.Name }}" labels: app: {{ template "fullname" . }} component: spinnaker @@ -30,4 +30,4 @@ spec: command: - sh - -c - - "kubectl delete job --namespace {{ .Release.Namespace }} {{.Release.Name}}-create-bucket {{.Release.Name}}-upload-build-image {{.Release.Name}}-delete-jobs {{.Release.Name}}-upload-run-script" + - "kubectl delete job --namespace {{ .Release.Namespace }} {{ .Release.Name }}-create-bucket {{ .Release.Name }}-upload-build-image {{ .Release.Name }}-delete-jobs {{ .Release.Name }}-upload-run-script" diff --git a/stable/spinnaker/templates/hooks/upload-build-image.yaml b/stable/spinnaker/templates/hooks/upload-build-image.yaml index cb987f14f818..d4aca348a950 100644 --- a/stable/spinnaker/templates/hooks/upload-build-image.yaml +++ b/stable/spinnaker/templates/hooks/upload-build-image.yaml @@ -1,7 +1,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: "{{.Release.Name}}-upload-build-image" + name: "{{ .Release.Name }}-upload-build-image" labels: app: {{ template "fullname" . }} component: jenkins @@ -10,7 +10,7 @@ metadata: spec: template: metadata: - name: "{{.Release.Name}}" + name: "{{ .Release.Name }}" labels: app: {{ template "fullname" . }} component: jenkins diff --git a/stable/spinnaker/templates/hooks/upload-run-script.yaml b/stable/spinnaker/templates/hooks/upload-run-script.yaml index 369357037014..c38c9e39fc50 100644 --- a/stable/spinnaker/templates/hooks/upload-run-script.yaml +++ b/stable/spinnaker/templates/hooks/upload-run-script.yaml @@ -1,7 +1,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: "{{.Release.Name}}-upload-run-script" + name: "{{ .Release.Name }}-upload-run-script" labels: app: {{ template "fullname" . }} component: jenkins @@ -10,7 +10,7 @@ metadata: spec: template: metadata: - name: "{{.Release.Name}}" + name: "{{ .Release.Name }}" labels: app: {{ template "fullname" . }} component: jenkins diff --git a/stable/stash/.helmignore b/stable/stash/.helmignore new file mode 100644 index 000000000000..f0c131944441 --- /dev/null +++ b/stable/stash/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/stable/stash/Chart.yaml b/stable/stash/Chart.yaml new file mode 100755 index 000000000000..0341c4219911 --- /dev/null +++ b/stable/stash/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +description: 'Stash by AppsCode - Backup your Kubernetes Volumes' +name: stash +version: 0.1.0 +appVersion: 0.3.1 +home: https://github.com/appscode/stash +icon: https://cdn.appscode.com/images/icon/stash.png +sources: + - https://github.com/appscode/stash +maintainers: + - name: appscode + email: support@appscode.com diff --git a/stable/stash/README.md b/stable/stash/README.md new file mode 100644 index 000000000000..e43f7bf0723a --- /dev/null +++ b/stable/stash/README.md @@ -0,0 +1,98 @@ +# Stash +[Stash by AppsCode](https://github.com/appscode/stash) - Backup your Kubernetes Volumes +## TL;DR; + +```bash +$ helm install stable/stash +``` + +## Introduction + +This chart bootstraps a [Stash controller](https://github.com/appscode/stash) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Kubernetes 1.5+ + +## Installing the Chart +To install the chart with the release name `my-release`: +```bash +$ helm install stable/stash --name my-release +``` +The command deploys Stash operator on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release`: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables lists the configurable parameters of the Stash chart and their default values. + + +| Parameter | Description | Default | +| ------------------------ | ----------------------------------------------------------------- | ------------------ | +| `replicaCount` | Number of stash operator replicas to create (only 1 is supported) | `1` | +| `operator.image` | operator container image | `appscode/stash` | +| `operator.tag` | operator container image tag | `0.3.1` | +| `operator.pullPolicy` | operator container image pull policy | `IfNotPresent` | +| `pushgateway.image` | Prometheus pushgateway container image | `prom/pushgateway` | +| `pushgateway.tag` | Prometheus pushgateway container image tag | `v0.4.0` | +| `pushgateway.pullPolicy` | Prometheus pushgateway container image pull policy | `IfNotPresent` | +| `rbac.install` | install required rbac service account, roles and rolebindings | `false` | +| `rbac.apiVersion` | rbac api version v1alpha1\|v1beta1 | `v1beta1` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example: + +```bash +$ helm install --name my-release --set image.tag=v0.2.1 stable/stash +``` + +Alternatively, a YAML file that specifies the values for the parameters can be provided while +installing the chart. For example: + +```bash +$ helm install --name my-release --values values.yaml stable/stash +``` + +## RBAC +By default the chart will not install the recommended RBAC roles and rolebindings. + +You need to have the following parameter on the api server. See the following document for how to enable [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/) + +``` +--authorization-mode=RBAC +``` + +To determine if your cluster supports RBAC, run the the following command: + +```console +$ kubectl api-versions | grep rbac +``` + +If the output contains "alpha" and/or "beta", you can may install the chart with RBAC enabled (see below). + +### Enable RBAC role/rolebinding creation + +To enable the creation of RBAC resources (On clusters with RBAC). Do the following: + +```console +$ helm install --name my-release stable/stash --set rbac.install=true +``` + +### Changing RBAC manifest apiVersion + +By default the RBAC resources are generated with the "v1beta1" apiVersion. To use "v1alpha1" do the following: + +```console +$ helm install --name my-release stable/stash --set rbac.install=true,rbac.apiVersion=v1alpha1 +``` diff --git a/stable/stash/templates/NOTES.txt b/stable/stash/templates/NOTES.txt new file mode 100644 index 000000000000..521dac2567ad --- /dev/null +++ b/stable/stash/templates/NOTES.txt @@ -0,0 +1,3 @@ +To verify that Stash has started, run: + + kubectl --namespace={{ .Release.Namespace }} get deployments -l "release={{ .Release.Name }}, app={{ template "name" . }}" diff --git a/stable/stash/templates/_helpers.tpl b/stable/stash/templates/_helpers.tpl new file mode 100644 index 000000000000..f0d83d2edba6 --- /dev/null +++ b/stable/stash/templates/_helpers.tpl @@ -0,0 +1,16 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/stable/stash/templates/cluster-role-binding.yaml b/stable/stash/templates/cluster-role-binding.yaml new file mode 100644 index 000000000000..4ba21801e0ac --- /dev/null +++ b/stable/stash/templates/cluster-role-binding.yaml @@ -0,0 +1,20 @@ +{{ if .Values.rbac.install }} +{{- $serviceName := include "fullname" . -}} +apiVersion: rbac.authorization.k8s.io/{{ required "A valid .Values.rbac.apiVersion entry required!" .Values.rbac.apiVersion }} +kind: ClusterRoleBinding +metadata: + name: {{ $serviceName }} + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: "{{ template "name" . }}" + heritage: "{{ .Release.Service }}" + release: "{{ .Release.Name }}" +subjects: +- kind: ServiceAccount + name: {{ $serviceName }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ $serviceName }} +{{ end }} diff --git a/stable/stash/templates/cluster-role.yaml b/stable/stash/templates/cluster-role.yaml new file mode 100644 index 000000000000..3523bbca8010 --- /dev/null +++ b/stable/stash/templates/cluster-role.yaml @@ -0,0 +1,52 @@ +{{ if .Values.rbac.install }} +{{- $serviceName := include "fullname" . -}} +apiVersion: rbac.authorization.k8s.io/{{ required "A valid .Values.rbac.apiVersion entry required!" .Values.rbac.apiVersion }} +kind: ClusterRole +metadata: + name: {{ $serviceName }} + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: "{{ template "name" . }}" + heritage: "{{ .Release.Service }}" + release: "{{ .Release.Name }}" +rules: +- apiGroups: + - extensions + resources: + - thirdpartyresources + verbs: ["get", "create"] +- apiGroups: + - stash.appscode.com + resources: + - restics + verbs: ["*"] +- apiGroups: + - extensions + resources: + - deployments + - replicasets + - daemonsets + verbs: ["list", "watch", "update"] +- apiGroups: + - apps + resources: + - deployments + verbs: ["list", "watch", "update"] +- apiGroups: [""] + resources: + - namespaces + - replicationcontrollers + verbs: ["list", "watch", "update"] +- apiGroups: [""] + resources: + - secrets + verbs: ["get"] +- apiGroups: [""] + resources: + - events + verbs: ["create"] +- apiGroups: [""] + resources: + - pods + verbs: ["list", delete"] +{{ end }} diff --git a/stable/stash/templates/deployment.yaml b/stable/stash/templates/deployment.yaml new file mode 100644 index 000000000000..b7c1cab86d3a --- /dev/null +++ b/stable/stash/templates/deployment.yaml @@ -0,0 +1,53 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "fullname" . }} + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: "{{ template "name" . }}" + heritage: "{{ .Release.Service }}" + release: "{{ .Release.Name }}" +spec: + replicas: {{ .Values.replicaCount }} + template: + metadata: + labels: + app: "{{ template "name" . }}" + release: "{{ .Release.Name }}" + spec: +{{- if .Values.rbac.install }} + serviceAccountName: {{ template "fullname" . }} +{{- end }} + containers: + - args: + - run + - --v=3 + image: '{{ .Values.operator.image }}:{{ .Values.operator.tag }}' + imagePullPolicy: '{{ .Values.operator.pullPolicy }}' + name: operator + ports: + - containerPort: 56790 + name: http + protocol: TCP + terminationMessagePolicy: File + - args: + - -web.listen-address=:56789 + - -persistence.file=/var/pv/pushgateway.dat + image: '{{ .Values.pushgateway.image }}:{{ .Values.pushgateway.tag }}' + imagePullPolicy: '{{ .Values.pushgateway.pullPolicy }}' + name: pushgateway + ports: + - containerPort: 56789 + name: pushgateway + protocol: TCP + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/pv + name: data-volume + - mountPath: /tmp + name: stash-scratchdir + volumes: + - emptyDir: {} + name: data-volume + - emptyDir: {} + name: stash-scratchdir diff --git a/stable/stash/templates/service-account.yaml b/stable/stash/templates/service-account.yaml new file mode 100644 index 000000000000..600557649097 --- /dev/null +++ b/stable/stash/templates/service-account.yaml @@ -0,0 +1,11 @@ +{{ if .Values.rbac.install }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "fullname" . }} + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: "{{ template "name" . }}" + heritage: "{{ .Release.Service }}" + release: "{{ .Release.Name }}" +{{ end }} diff --git a/stable/stash/templates/service.yaml b/stable/stash/templates/service.yaml new file mode 100644 index 000000000000..3436b1ae702e --- /dev/null +++ b/stable/stash/templates/service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "fullname" . }} + labels: + app: "{{ template "name" . }}" + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + ports: + - name: pushgateway + port: 56789 + protocol: TCP + targetPort: pushgateway + - name: http + port: 56790 + protocol: TCP + targetPort: http + selector: + app: "{{ template "name" . }}" + release: "{{ .Release.Name }}" diff --git a/stable/stash/values.yaml b/stable/stash/values.yaml new file mode 100644 index 000000000000..90f656dde921 --- /dev/null +++ b/stable/stash/values.yaml @@ -0,0 +1,16 @@ +# Default values for stash. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +replicaCount: 1 +operator: + image: appscode/stash + pullPolicy: IfNotPresent + tag: 0.3.1 +pushgateway: + image: prom/pushgateway + pullPolicy: IfNotPresent + tag: v0.4.0 +## Install Default RBAC roles and bindings +rbac: + install: false + apiVersion: v1beta1 diff --git a/stable/sumokube/README.md b/stable/sumokube/README.md index afff0889f0cc..aec4373d9b53 100644 --- a/stable/sumokube/README.md +++ b/stable/sumokube/README.md @@ -54,6 +54,7 @@ The following tables lists the configurable parameters of the Sumokube chart and | `resources.limits.cpu` | CPU resource limits | 256m | | `resources.requests.memory` | Memory resource requests | 128Mi | | `resources.limits.memory` | Memory resource limits | 256Mi | +| `daemonset.tolerations` | List of node taints to tolerate (requires Kubernetes >= 1.6) | [] | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, @@ -64,6 +65,14 @@ $ helm install --name my-release \ stable/sumokube ``` +Example of adding daemonset tolerations to run on master nodes. Requires Helm >=2.5 + +```bash +$ helm install --name my-release \ + --set sumologic.accessId=YOUR-ID-HERE,sumologic.accessKey=YOUR-KEY-HERE,sumologic.categoryName=my-source-category-name,daemonset.tolerations[0].effect=NoSchedule,daemonset.tolerations[0].key=node-role.kubernetes.io/master \ + stable/sumokube +``` + Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, ```bash diff --git a/stable/sumokube/templates/config.yaml b/stable/sumokube/templates/config.yaml index 8c7cab9a7ce1..e2caa2e965a0 100644 --- a/stable/sumokube/templates/config.yaml +++ b/stable/sumokube/templates/config.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ConfigMap metadata: metadata: - name: "{{ template "fullname" . }}-config-{{.Release.Time.Seconds }}" + name: "{{ template "fullname" . }}-config-{{ .Release.Time.Seconds }}" labels: app: {{ template "fullname" . }} chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" diff --git a/stable/sumokube/templates/daemonset.yaml b/stable/sumokube/templates/daemonset.yaml index 8b64fc4a9dea..c62b1bd7cf48 100644 --- a/stable/sumokube/templates/daemonset.yaml +++ b/stable/sumokube/templates/daemonset.yaml @@ -61,4 +61,6 @@ spec: - name: sumo-sources configMap: name: "{{ template "fullname" . }}-config-{{.Release.Time.Seconds }}" + tolerations: +{{ toYaml .Values.daemonset.tolerations | indent 8 }} {{ end }} diff --git a/stable/sumokube/templates/secrets.yaml b/stable/sumokube/templates/secrets.yaml index 2678aa3c486d..1013cde3c699 100644 --- a/stable/sumokube/templates/secrets.yaml +++ b/stable/sumokube/templates/secrets.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: Secret metadata: - name: "{{ template "fullname" . }}-secrets-{{.Release.Time.Seconds }}" + name: "{{ template "fullname" . }}-secrets-{{ .Release.Time.Seconds }}" labels: app: {{ template "fullname" . }} chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" diff --git a/stable/sumokube/values.yaml b/stable/sumokube/values.yaml index d48c3540d058..132b650e81f0 100644 --- a/stable/sumokube/values.yaml +++ b/stable/sumokube/values.yaml @@ -29,3 +29,7 @@ resources: limits: cpu: 256m memory: 256Mi + +daemonset: + tolerations: [] + diff --git a/stable/testlink/Chart.yaml b/stable/testlink/Chart.yaml index d75d17903d80..cb403e5d4a70 100644 --- a/stable/testlink/Chart.yaml +++ b/stable/testlink/Chart.yaml @@ -1,5 +1,5 @@ name: testlink -version: 0.4.10 +version: 0.4.11 appVersion: 1.9.16 description: Web-based test management system that facilitates software quality assurance. icon: https://bitnami.com/assets/stacks/testlink/img/testlink-stack-220x234.png diff --git a/stable/testlink/requirements.lock b/stable/testlink/requirements.lock index d373b662a8ee..174f6d875f2f 100644 --- a/stable/testlink/requirements.lock +++ b/stable/testlink/requirements.lock @@ -5,6 +5,6 @@ dependencies: name: mariadb repository: https://kubernetes-charts.storage.googleapis.com/ tags: null - version: 0.6.2 -digest: sha256:66acb700f673b56045b00d0b65e3ab750f12941005e7631d69bd4101f51424ab -generated: 2017-05-18T13:23:44.832602968-04:00 + version: 0.6.3 +digest: sha256:99135a083bee8717224e9f5a4e151b2831bc8367b1b88075b6405dd190ac7a11 +generated: 2017-06-22T19:36:52.721835477-04:00 diff --git a/stable/testlink/requirements.yaml b/stable/testlink/requirements.yaml index e12e2163630d..d376de89806e 100644 --- a/stable/testlink/requirements.yaml +++ b/stable/testlink/requirements.yaml @@ -1,4 +1,4 @@ dependencies: - name: mariadb - version: 0.6.2 + version: 0.6.3 repository: https://kubernetes-charts.storage.googleapis.com/ diff --git a/stable/traefik/Chart.yaml b/stable/traefik/Chart.yaml index f2533fba13a8..b3709b04cb61 100755 --- a/stable/traefik/Chart.yaml +++ b/stable/traefik/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: traefik -version: 1.3.0 -appVersion: 1.2.1 +version: 1.7.0 +appVersion: 1.3.1 description: A Traefik based Kubernetes ingress controller with Let's Encrypt support keywords: - traefik diff --git a/stable/traefik/README.md b/stable/traefik/README.md index eb823203374d..bf70ac9c33f1 100644 --- a/stable/traefik/README.md +++ b/stable/traefik/README.md @@ -18,6 +18,7 @@ resources _cluster-wide_. ## Prerequisites - Kubernetes 1.4+ with Beta APIs enabled +- Kubernetes 1.6+ if you want to enable RBAC - You are deploying the chart to a cluster with a cloud provider capable of provisioning an external load balancer (e.g. AWS or GKE) - You control DNS for the domain(s) you intend to route through Traefik @@ -88,13 +89,16 @@ The following tables lists the configurable parameters of the Traefik chart and | Parameter | Description | Default | | ------------------------------- | -------------------------------------------------------------------- | ----------------------------------------- | | `image` | Traefik image name | `traefik` | -| `imageTag` | The version of the official Traefik image to use | `v1.2.1` | +| `imageTag` | The version of the official Traefik image to use | `1.3.1` | | `serviceType` | A valid Kubernetes service type | `LoadBalancer` | | `replicas` | The number of replicas to run; __NOTE:__ Full Traefik clustering with leader election is not yet supported, which can affect any configured Let's Encrypt setup; see Clustering section | `1` | | `cpuRequest` | Initial share of CPU requested per Traefik pod | `100m` | | `memoryRequest` | Initial share of memory requested per Traefik pod | `20Mi` | | `cpuLimit` | CPU limit per Traefik pod | `200m` | | `memoryLimit` | Memory limit per Traefik pod | `30Mi` | +| `rbac.enabled` | Whether to enable RBAC with a specific cluster role and binding for Traefik | `false` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | List of node taints to tolerate | `[]` | | `ssl.enabled` | Whether to enable HTTPS | `false` | | `ssl.enforced` | Whether to redirect HTTP requests to HTTPS | `false` | | `ssl.defaultCert` | Base64 encoded default certficate | A self-signed certificate | @@ -115,6 +119,9 @@ The following tables lists the configurable parameters of the Traefik chart and | `gzip.enabled` | Whether to use gzip compression | `true` | | `kubernetes.namespaces` | List of Kubernetes namespaces to watch | All namespaces | | `kubernetes.labelSelector` | Valid Kubernetes ingress label selector to watch (e.g `realm=public`)| No label filter | +| `accessLogs.enabled` | Whether to enable Traefik's access logs | `false` | +| `accessLogs.filePath` | The path to the log file. Logs to stdout if omitted | None | +| `accessLogs.format` | What format the log entries should be in. Either `common` or `json` | `common` | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example: diff --git a/stable/traefik/templates/configmap.yaml b/stable/traefik/templates/configmap.yaml index db25629765f5..f06989f69941 100644 --- a/stable/traefik/templates/configmap.yaml +++ b/stable/traefik/templates/configmap.yaml @@ -47,6 +47,13 @@ data: labelselector = {{ .Values.kubernetes.labelSelector | quote }} {{- end}} {{- end}} + {{- if .Values.accessLogs.enabled }} + [accessLogs] + {{- if .Values.accessLogs.filePath }} + filePath = "{{ .Values.accessLogs.filePath }}" + {{- end}} + format = "{{ .Values.accessLogs.format }}" + {{- end}} {{- if .Values.acme.enabled }} [acme] email = "{{ .Values.acme.email }}" diff --git a/stable/traefik/templates/deployment.yaml b/stable/traefik/templates/deployment.yaml index 6eff0d759325..9aaca5403b1e 100644 --- a/stable/traefik/templates/deployment.yaml +++ b/stable/traefik/templates/deployment.yaml @@ -15,22 +15,34 @@ spec: template: metadata: labels: + {{- if and (.Values.tolerations) (le .Capabilities.KubeVersion.Minor "5") }} + scheduler.alpha.kubernetes.io/tolerations: '{{ toJson .Values.tolerations }}' + {{- end }} app: {{ template "fullname" . }} chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" release: "{{ .Release.Name }}" heritage: "{{ .Release.Service }}" spec: + {{- if .Values.rbac.enabled }} + serviceAccountName: {{ template "fullname" . }} + {{- else }} + serviceAccountName: default + {{- end }} terminationGracePeriodSeconds: 60 + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} containers: - image: {{ .Values.image }}:{{ .Values.imageTag }} name: {{ template "fullname" . }} resources: requests: - cpu: "{{.Values.cpuRequest}}" - memory: "{{.Values.memoryRequest}}" + cpu: "{{ .Values.cpuRequest }}" + memory: "{{ .Values.memoryRequest }}" limits: - cpu: "{{.Values.cpuLimit}}" - memory: "{{.Values.memoryLimit}}" + cpu: "{{ .Values.cpuLimit }}" + memory: "{{ .Values.memoryLimit }}" readinessProbe: tcpSocket: port: 80 @@ -84,3 +96,7 @@ spec: emptyDir: {} {{- end }} {{- end }} + {{- if and (.Values.tolerations) (ge .Capabilities.KubeVersion.Minor "6") }} + tolerations: +{{ toYaml .Values.tolerations | indent 6 }} + {{- end }} diff --git a/stable/traefik/templates/rbac.yaml b/stable/traefik/templates/rbac.yaml new file mode 100644 index 000000000000..6ee44655af71 --- /dev/null +++ b/stable/traefik/templates/rbac.yaml @@ -0,0 +1,43 @@ +{{- if .Values.rbac.enabled }} +kind: ServiceAccount +apiVersion: v1 +metadata: + name: {{ template "fullname" . }} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: {{ template "fullname" . }} +rules: + - apiGroups: + - "" + resources: + - pods + - services + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: {{ template "fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "fullname" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/stable/traefik/values.yaml b/stable/traefik/values.yaml index 23b750d5a2dc..07038df5f13c 100644 --- a/stable/traefik/values.yaml +++ b/stable/traefik/values.yaml @@ -1,12 +1,19 @@ # Default values for Traefik image: traefik -imageTag: v1.2.1 +imageTag: 1.3.1 serviceType: LoadBalancer replicas: 1 cpuRequest: 100m memoryRequest: 20Mi cpuLimit: 100m memoryLimit: 30Mi +nodeSelector: {} + #key: value +tolerations: [] + #- key: "key" + #operator: "Equal|Exists" + #value: "value" + #effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" ssl: enabled: false enforced: false @@ -42,8 +49,15 @@ service: # key: value gzip: enabled: true +accessLogs: + enabled: false + ## Path to the access logs file. If not provided, Traefik defaults it to stdout. + # filePath: "" + format: common # choices are: common, json # Kubernetes ingress filters #kubernetes: # namespaces: # - default # labelSelector: +rbac: + enabled: false diff --git a/stable/uchiwa/templates/deployment.yaml b/stable/uchiwa/templates/deployment.yaml index 059a4a121f87..217cf3ad6215 100644 --- a/stable/uchiwa/templates/deployment.yaml +++ b/stable/uchiwa/templates/deployment.yaml @@ -1,7 +1,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: - name: {{template "fullname" .}} + name: {{ template "fullname" . }} labels: heritage: {{ .Release.Service | quote }} release: {{ .Release.Name | quote }} diff --git a/stable/uchiwa/templates/svc.yaml b/stable/uchiwa/templates/svc.yaml index 2e07e795fbfb..297f9a651849 100644 --- a/stable/uchiwa/templates/svc.yaml +++ b/stable/uchiwa/templates/svc.yaml @@ -10,11 +10,11 @@ metadata: router.deis.io/routable: "true" annotations: router.deis.io/domains: {{ .Values.deis.domains | quote }} - {{end}} + {{ end }} spec: - type: {{.Values.serviceType}} + type: {{ .Values.serviceType }} ports: - port: {{ .Values.httpPort }} targetPort: {{ .Values.port }} selector: - app: {{ template "fullname" . }} \ No newline at end of file + app: {{ template "fullname" . }} diff --git a/stable/voyager/Chart.yaml b/stable/voyager/Chart.yaml index cc1e54ed4e60..eb01207ddd04 100755 --- a/stable/voyager/Chart.yaml +++ b/stable/voyager/Chart.yaml @@ -2,7 +2,8 @@ apiVersion: v1 description: Voyager provides controller for Ingress and Certificates for Kubernetes developed by AppsCode. icon: https://cdn.appscode.com/images/icon/voyager.png name: voyager -version: 0.1.0 +version: 1.0.0 +appVersion: 3.0.0 sources: - https://github.com/appscode/voyager maintainers: diff --git a/stable/voyager/README.md b/stable/voyager/README.md index f553e50eb72e..191b801a3514 100644 --- a/stable/voyager/README.md +++ b/stable/voyager/README.md @@ -39,9 +39,12 @@ The command removes all the Kubernetes components associated with the chart and The following tables lists the configurable parameters of the Voyager chart and their default values. -| Parameter | Description | Default | -| ----------------------- | ---------------------------- | -------------------- | -| `image` | Container image to run | `appscode/voyager` | -| `imageTag` | Image tag of container | `1.5.6` | -| `cloudProvider` | Name of cloud provider | `` | -| `logLevel` | Log level for voyager | `3` | +| Parameter | Description | Default | +| ----------------------- | ---------------------------------- | -------------------- | +| `image` | Container image to run | `appscode/voyager` | +| `imageTag` | Image tag of container | `3.0.0` | +| `cloudProvider` | Name of cloud provider | `` | +| `cloudConfig` | Path to cloud config | `` | +| `logLevel` | Log level for operator | `3` | +| `persistence.enabled` | Enable mounting cloud config | `false` | +| `persistence.hostPath` | Host mount path for cloud config | `/etc/kubernetes` | diff --git a/stable/voyager/templates/deployment.yaml b/stable/voyager/templates/deployment.yaml index 19b9ccb3c677..e673d419af57 100644 --- a/stable/voyager/templates/deployment.yaml +++ b/stable/voyager/templates/deployment.yaml @@ -23,10 +23,26 @@ spec: - args: - run - --cloud-provider={{ .Values.cloudProvider }} + - --cloud-config={{ .Values.cloudConfig }} - --v={{ .Values.logLevel }} image: "{{ .Values.image }}:{{ .Values.imageTag }}" name: voyager + env: + - name: OPERATOR_SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName ports: - - containerPort: 8080 + - containerPort: 56790 name: http protocol: TCP +{{- if .Values.persistence.enabled }} + volumeMounts: + - mountPath: {{ dir .Values.cloudConfig | quote }} + name: cloudconfig + readOnly: true + volumes: + - hostPath: + path: {{ .Values.persistence.hostPath | quote }} + name: cloudconfig +{{- end -}} diff --git a/stable/voyager/templates/svc.yaml b/stable/voyager/templates/svc.yaml index 1393a6e06d41..deae3a4cb4c4 100644 --- a/stable/voyager/templates/svc.yaml +++ b/stable/voyager/templates/svc.yaml @@ -10,7 +10,7 @@ metadata: spec: ports: - name: http - port: 8080 + port: 56790 targetPort: http selector: app: {{ template "name" . }} diff --git a/stable/voyager/values.yaml b/stable/voyager/values.yaml index 1e66d234899c..06a2f2dee859 100644 --- a/stable/voyager/values.yaml +++ b/stable/voyager/values.yaml @@ -2,8 +2,14 @@ ## Voyager chart configuration ## image: appscode/voyager -imageTag: 1.5.6 +imageTag: 3.0.0 ## Use cloud provider here. Read details https://github.com/appscode/voyager/blob/master/docs/user-guide/README.md cloudProvider: cloud_provider +## The path to the cloud provider configuration file. Empty string for no configuration file. +## ie. for azure use /etc/kubernetes/azure.json +# cloudConfig: /etc/kubernetes/azure.json ## Log level for voyager logLevel: 3 +persistence: + enabled: false + hostPath: /etc/kubernetes diff --git a/stable/weave-cloud/templates/NOTES.txt b/stable/weave-cloud/templates/NOTES.txt index 441d22e6d743..af16ccb999e9 100644 --- a/stable/weave-cloud/templates/NOTES.txt +++ b/stable/weave-cloud/templates/NOTES.txt @@ -25,4 +25,4 @@ Then run: helm upgrade {{ .Release.Name }} --set ServiceToken= stable/weave-cloud -{{- end}} +{{- end }} diff --git a/stable/weave-cloud/templates/cortex.yaml b/stable/weave-cloud/templates/cortex.yaml index 82151761ed18..e05ce48012ee 100644 --- a/stable/weave-cloud/templates/cortex.yaml +++ b/stable/weave-cloud/templates/cortex.yaml @@ -149,7 +149,7 @@ items: remote_write: - url: 'https://cloud.weave.works/api/prom/push' basic_auth: - password: '{{.Values.ServiceToken}}' + password: '{{ .Values.ServiceToken }}' scrape_configs: - job_name: kubernetes-service-endpoints kubernetes_sd_configs: diff --git a/stable/weave-cloud/templates/flux.yaml b/stable/weave-cloud/templates/flux.yaml index 65f8a7daf8cd..606e67636ab7 100644 --- a/stable/weave-cloud/templates/flux.yaml +++ b/stable/weave-cloud/templates/flux.yaml @@ -60,7 +60,7 @@ items: containers: - name: agent args: - - '--token={{.Values.ServiceToken}}' + - '--token={{ .Values.ServiceToken }}' image: 'quay.io/weaveworks/fluxd:0.2.0' imagePullPolicy: IfNotPresent serviceAccountName: weave-flux diff --git a/stable/weave-cloud/templates/scope.yaml b/stable/weave-cloud/templates/scope.yaml index 06fa7e7539c6..783066074775 100644 --- a/stable/weave-cloud/templates/scope.yaml +++ b/stable/weave-cloud/templates/scope.yaml @@ -63,7 +63,7 @@ items: - '--probe.docker.bridge=docker0' - '--probe.docker=true' - '--probe.kubernetes=true' - - '--service-token={{.Values.ServiceToken}}' + - '--service-token={{ .Values.ServiceToken }}' image: 'weaveworks/scope:1.4.0' imagePullPolicy: IfNotPresent securityContext: diff --git a/stable/wordpress/Chart.yaml b/stable/wordpress/Chart.yaml index 36e3c18684f5..d699acc9ee89 100644 --- a/stable/wordpress/Chart.yaml +++ b/stable/wordpress/Chart.yaml @@ -1,5 +1,5 @@ name: wordpress -version: 0.6.5 +version: 0.6.7 appVersion: 4.8 description: Web publishing platform for building blogs and websites. icon: https://bitnami.com/assets/stacks/wordpress/img/wordpress-stack-220x234.png diff --git a/stable/wordpress/requirements.lock b/stable/wordpress/requirements.lock index d69a069f14bb..30d098ceee73 100644 --- a/stable/wordpress/requirements.lock +++ b/stable/wordpress/requirements.lock @@ -5,6 +5,6 @@ dependencies: name: mariadb repository: https://kubernetes-charts.storage.googleapis.com/ tags: null - version: 0.6.2 -digest: sha256:66acb700f673b56045b00d0b65e3ab750f12941005e7631d69bd4101f51424ab -generated: 2017-05-18T13:23:45.146992314-04:00 + version: 0.6.3 +digest: sha256:99135a083bee8717224e9f5a4e151b2831bc8367b1b88075b6405dd190ac7a11 +generated: 2017-06-22T19:36:53.184216647-04:00 diff --git a/stable/wordpress/requirements.yaml b/stable/wordpress/requirements.yaml index e12e2163630d..d376de89806e 100644 --- a/stable/wordpress/requirements.yaml +++ b/stable/wordpress/requirements.yaml @@ -1,4 +1,4 @@ dependencies: - name: mariadb - version: 0.6.2 + version: 0.6.3 repository: https://kubernetes-charts.storage.googleapis.com/ diff --git a/stable/wordpress/templates/tests/test-mariadb-connection.yaml b/stable/wordpress/templates/tests/test-mariadb-connection.yaml new file mode 100644 index 000000000000..b25414943bad --- /dev/null +++ b/stable/wordpress/templates/tests/test-mariadb-connection.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ .Release.Name }}-credentials-test" + annotations: + "helm.sh/hook": test-success +spec: + containers: + - name: {{ .Release.Name }}-credentials-test + image: {{ .Values.image }} + env: + - name: MARIADB_HOST + value: {{ template "mariadb.fullname" . }} + - name: MARIADB_PORT + value: "3306" + - name: WORDPRESS_DATABASE_NAME + value: {{ default "" .Values.mariadb.mariadbDatabase | quote }} + - name: WORDPRESS_DATABASE_USER + value: {{ default "" .Values.mariadb.mariadbUser | quote }} + - name: WORDPRESS_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mariadb.fullname" . }} + key: mariadb-password + command: ["sh", "-c", "mysql --host=$MARIADB_HOST --port=$MARIADB_PORT --user=$WORDPRESS_DATABASE_USER --password=$WORDPRESS_DATABASE_PASSWORD"] + restartPolicy: Never + diff --git a/test/helm-test-e2e.sh b/test/helm-test-e2e.sh index 5e3a47ad1a5e..4973497f366e 100755 --- a/test/helm-test-e2e.sh +++ b/test/helm-test-e2e.sh @@ -20,6 +20,9 @@ kubectl -n kube-system create sa tiller kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller linux-amd64/helm init --service-account tiller --upgrade +linux-amd64/helm repo add stable https://kubernetes-charts.storage.googleapis.com/ +linux-amd64/helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/ + # Run test framework pushd . cd $GOPATH diff --git a/test/helm-test/main.go b/test/helm-test/main.go index 2dc92cb6eb90..612f2a0bed56 100644 --- a/test/helm-test/main.go +++ b/test/helm-test/main.go @@ -232,6 +232,14 @@ func doMain() int { return execErr }) + xmlWrap(fmt.Sprintf("Helm Dep Build %s", path.Base(chartPath)), func() error { + o, execErr := output(exec.Command(helmPath, "dep", "build", chartPath)) + if execErr != nil { + return fmt.Errorf("%s Command output: %s", execErr, string(o[:])) + } + return nil + }) + xmlWrap(fmt.Sprintf("Helm Install %s", path.Base(chartPath)), func() error { o, execErr := output(exec.Command(helmPath, "install", chartPath, "--namespace", ns, "--name", rel, "--wait")) if execErr != nil { diff --git a/test/repo-sync.sh b/test/repo-sync.sh index 32810203472b..beab2288d434 100644 --- a/test/repo-sync.sh +++ b/test/repo-sync.sh @@ -15,7 +15,7 @@ # Setup Helm HELM_URL=https://storage.googleapis.com/kubernetes-helm -HELM_TARBALL=helm-v2.1.3-linux-amd64.tar.gz +HELM_TARBALL=helm-v2.4.2-linux-amd64.tar.gz STABLE_REPO_URL=https://kubernetes-charts.storage.googleapis.com/ INCUBATOR_REPO_URL=https://kubernetes-charts-incubator.storage.googleapis.com/ wget -q ${HELM_URL}/${HELM_TARBALL}