From a3c700b4e107361b1ac449d012a0044ab48eeb89 Mon Sep 17 00:00:00 2001 From: Tuna Date: Tue, 23 Jan 2018 23:45:15 +0100 Subject: [PATCH 1/2] mark kafka installation as optional update docs update Makefile, travis --- .travis.yml | 1 + Makefile | 4 +- README.md | 43 +++++--- docs/GKE-deployment.md | 5 + kafkazk.jsonnet | 196 +++++++++++++++++++++++++++++++++++++ kubeless-openshift.jsonnet | 2 - kubeless.jsonnet | 185 ---------------------------------- 7 files changed, 234 insertions(+), 202 deletions(-) create mode 100644 kafkazk.jsonnet diff --git a/.travis.yml b/.travis.yml index f772af9b4..0cdf6f8cd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -162,6 +162,7 @@ deploy: - kubeless-${TRAVIS_TAG}.yaml - kubeless-rbac-${TRAVIS_TAG}.yaml - kubeless-openshift-${TRAVIS_TAG}.yaml + - kafkazk-${TRAVIS_TAG}.yaml - bundles/kubeless_*.zip skip_cleanup: true overwrite: true diff --git a/Makefile b/Makefile index 4d4c13df0..1426e12a5 100644 --- a/Makefile +++ b/Makefile @@ -37,7 +37,7 @@ binary-cross: $(KUBECFG) show -o yaml $< > $@.tmp mv $@.tmp $@ -all-yaml: kubeless.yaml kubeless-rbac.yaml kubeless-openshift.yaml +all-yaml: kubeless.yaml kubeless-rbac.yaml kubeless-openshift.yaml kafkazk.yaml kubeless.yaml: kubeless.jsonnet @@ -45,6 +45,8 @@ kubeless-rbac.yaml: kubeless-rbac.jsonnet kubeless.jsonnet kubeless-openshift.yaml: kubeless-openshift.jsonnet kubeless-rbac.jsonnet +kafkazk.yaml: kafkazk.jsonnet + docker/controller: controller-build cp $(BUNDLES)/kubeless_$(OS)-$(ARCH)/kubeless-controller $@ diff --git a/README.md b/README.md index bee1a3e59..deb1fe235 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ `kubeless` is a Kubernetes-native serverless framework that lets you deploy small bits of code without having to worry about the underlying infrastructure plumbing. It leverages Kubernetes resources to provide auto-scaling, API routing, monitoring, troubleshooting and more. -Kubeless stands out as we use a [Custom Resource Definition](https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/) to be able to create functions as custom kubernetes resources. We then run an in-cluster controller that watches these custom resources and launches _runtimes_ on-demand. The controller dynamically injects the functions code into the runtimes and make them available over HTTP or via a PubSub mechanism. +Kubeless stands out as we use a [Custom Resource Definition](https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/) to be able to create functions as custom kubernetes resources. We then run an in-cluster controller that watches these custom resources and launches _runtimes_ on-demand. The controller dynamically injects the functions code into the runtimes and make them available over HTTP or via a PubSub mechanism (optinal). Kubeless is purely open-source and non-affiliated to any commercial organization. Chime in at anytime, we would love the help and feedback ! @@ -30,13 +30,17 @@ Installation is made of three steps: * Download the `kubeless` CLI from the [release page](https://github.com/kubeless/kubeless/releases). (OSX users can also use [brew](https://brew.sh/): `brew install kubeless`). * Create a `kubeless` namespace (used by default) -* Then use one of the YAML manifests found in the release page to deploy kubeless. It will create a _functions_ Custom Resource Definition and launch a controller. You will see a _kubeless_ controller, a _kafka_ and a _zookeeper_ statefulset appear and shortly get in running state. +* Then use one of the YAML manifests found in the release page to deploy kubeless. It will create a _functions_ Custom Resource Definition and launch a controller. There are several kubeless manifests being shipped for multiple k8s environments (non-rbac, rbac and openshift), pick the one that corresponds to your environment: -* [`kubeless-$RELEASE.yaml`](https://github.com/kubeless/kubeless/releases/download/v0.2.4/kubeless-v0.2.4.yaml) is used for non-RBAC Kubernetes cluster. -* [`kubeless-rbac-$RELEASE.yaml`](https://github.com/kubeless/kubeless/releases/download/v0.2.4/kubeless-rbac-v0.2.4.yaml) is used for RBAC-enabled Kubernetes cluster. -* [`kubeless-openshift-$RELEASE.yaml`](https://github.com/kubeless/kubeless/releases/download/v0.2.4/kubeless-openshift-v0.2.4.yaml) is used to deploy Kubeless to OpenShift (1.5+). +* [`kubeless-$RELEASE.yaml`](https://github.com/kubeless/kubeless/releases/download/$RELEASE/kubeless-$RELEASE.yaml) is used for non-RBAC Kubernetes cluster. +* [`kubeless-rbac-$RELEASE.yaml`](https://github.com/kubeless/kubeless/releases/download/$RELEASE/kubeless-rbac-$RELEASE.yaml) is used for RBAC-enabled Kubernetes cluster. +* [`kubeless-openshift-$RELEASE.yaml`](https://github.com/kubeless/kubeless/releases/download/$RELEASE/kubeless-openshift-$RELEASE.yaml) is used to deploy Kubeless to OpenShift (1.5+). + +We also provide an optional `kafka-zookeeper` statefulset manifest to give you a handy option to try out the PubSub mechanism. + +* [`kafkazk-$RELEASE.yaml`](https://github.com/kubeless/kubeless/releases/download/$RELEASE/kafkazk-$RELEASE.yaml) For example, this below is a show case of deploying kubeless to a non-RBAC Kubernetes cluster. @@ -47,19 +51,12 @@ $ kubectl create -f https://github.com/kubeless/kubeless/releases/download/$RELE $ kubectl get pods -n kubeless NAME READY STATUS RESTARTS AGE -kafka-0 1/1 Running 0 1m kubeless-controller-3331951411-d60km 1/1 Running 0 1m -zoo-0 1/1 Running 0 1m $ kubectl get deployment -n kubeless NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE kubeless-controller 1 1 1 1 1m -$ kubectl get statefulset -n kubeless -NAME DESIRED CURRENT AGE -kafka 1 1 1m -zoo 1 1 1m - $ kubectl get customresourcedefinition NAME KIND functions.k8s.io CustomResourceDefinition.v1beta1.apiextensions.k8s.io @@ -83,7 +80,7 @@ You are now ready to create functions. You can use the CLI to create a function. Functions have three possible types: * http triggered (function will expose an HTTP endpoint) -* pubsub triggered (function will consume event on a specific topic) +* pubsub triggered (function will consume event on a specific topic; kafka/zookeeper statefulsets are required) * schedule triggered (function will be called on a cron schedule) ### HTTP function @@ -162,7 +159,25 @@ Kubeless also supports [ingress](https://kubernetes.io/docs/concepts/services-ne ### PubSub function -A function can be as simple as: +NOTE: Set of Kafka-Zookeeper are required to deploy pubsub function at this moment. Please consider to use our provided manifest to deploy kafka and zookeeper statefulsets, as Kubeless hasn't been configured to integrate with existing kafka/zookeeper system. Kafka statefulset uses a PVC (persistent volume claim). Depending on the configuration of your cluster you may need to provision a PV (Persistent Volume) that matches the PVC or configure dynamic storage provisioning. Otherwise Kafka pod will fail to get scheduled. Please refer to [PV](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) documentation on how to provision storage for PVC. + +Once deployed, you can verify two statefulsets up and running: + +```console +$ kubectl -n kubeless get statefulset +NAME DESIRED CURRENT AGE +kafka 1 1 40s +zoo 1 1 42s + +$ kubectl -n kubeless get svc +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +broker ClusterIP None 9092/TCP 1m +kafka ClusterIP 10.55.250.89 9092/TCP 1m +zoo ClusterIP None 9092/TCP,3888/TCP 1m +zookeeper ClusterIP 10.55.249.102 2181/TCP 1m +``` + +Now you can deploy a pubsub function. A function can be as simple as: ```python def foobar(context): diff --git a/docs/GKE-deployment.md b/docs/GKE-deployment.md index 1b85319ab..98dfe8491 100644 --- a/docs/GKE-deployment.md +++ b/docs/GKE-deployment.md @@ -73,6 +73,11 @@ export KUBELESS_VERSION= kubectl create namespace kubeless kubectl create -f https://github.com/kubeless/kubeless/releases/download/v$KUBELESS_VERSION/kubeless-rbac-v$KUBELESS_VERSION.yaml ``` +Optionally, if you want to go with PubSub function, please also deploy our provided Kafka/Zookeeper system: + +``` +kubectl create -f https://github.com/kubeless/kubeless/releases/download/v$KUBELESS_VERSION/kafkazk-v$KUBELESS_VERSION.yaml +``` ## Kubeless on GKE 1.8.x diff --git a/kafkazk.jsonnet b/kafkazk.jsonnet new file mode 100644 index 000000000..5250cf404 --- /dev/null +++ b/kafkazk.jsonnet @@ -0,0 +1,196 @@ +local k = import "ksonnet.beta.1/k.libsonnet"; + +local statefulset = k.apps.v1beta1.statefulSet; +local container = k.core.v1.container; +local service = k.core.v1.service; + +local namespace = "kubeless"; +local controller_account_name = "controller-acct"; + +local kafkaEnv = [ + { + name: "KAFKA_ADVERTISED_HOST_NAME", + value: "broker.kubeless" + }, + { + name: "KAFKA_ADVERTISED_PORT", + value: "9092" + }, + { + name: "KAFKA_PORT", + value: "9092" + }, + { + name: "KAFKA_DELETE_TOPIC_ENABLE", + value: "true" + }, + { + name: "KAFKA_ZOOKEEPER_CONNECT", + value: "zookeeper.kubeless:2181" + }, + { + name: "ALLOW_PLAINTEXT_LISTENER", + value: "yes" + } +]; + +local zookeeperEnv = [ + { + name: "ZOO_SERVERS", + value: "server.1=zoo-0.zoo:2888:3888:participant" + }, + { + name: "ALLOW_ANONYMOUS_LOGIN", + value: "yes" + } +]; + +local zookeeperPorts = [ + { + containerPort: 2181, + name: "client" + }, + { + containerPort: 2888, + name: "peer" + }, + { + containerPort: 3888, + name: "leader-election" + } +]; + +local kafkaContainer = + container.default("broker", "bitnami/kafka@sha256:0c4be25cd3b31176a4c738da64d988d614b939021bedf7e1b0cc72b37a071ecb") + + container.imagePullPolicy("IfNotPresent") + + container.env(kafkaEnv) + + container.ports({containerPort: 9092}) + + container.livenessProbe({tcpSocket: {port: 9092}, initialDelaySeconds: 30}) + + container.volumeMounts([ + { + name: "datadir", + mountPath: "/bitnami/kafka/data" + } + ]); + +local kafkaInitContainer = + container.default("volume-permissions", "busybox") + + container.imagePullPolicy("IfNotPresent") + + container.command(["sh", "-c", "chmod -R g+rwX /bitnami"]) + + container.volumeMounts([ + { + name: "datadir", + mountPath: "/bitnami/kafka/data" + } + ]); + +local zookeeperContainer = + container.default("zookeeper", "bitnami/zookeeper@sha256:f66625a8a25070bee18fddf42319ec58f0c49c376b19a5eb252e6a4814f07123") + + container.imagePullPolicy("IfNotPresent") + + container.env(zookeeperEnv) + + container.ports(zookeeperPorts) + + container.volumeMounts([ + { + name: "zookeeper", + mountPath: "/bitnami/zookeeper" + } + ]); + +local zookeeperInitContainer = + container.default("volume-permissions", "busybox") + + container.imagePullPolicy("IfNotPresent") + + container.command(["sh", "-c", "chmod -R g+rwX /bitnami"]) + + container.volumeMounts([ + { + name: "zookeeper", + mountPath: "/bitnami/zookeeper" + } + ]); + +local kafkaLabel = {kubeless: "kafka"}; +local zookeeperLabel = {kubeless: "zookeeper"}; + +local kafkaVolumeCT = [ + { + "metadata": { + "name": "datadir" + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "1Gi" + } + } + } + } +]; + +local zooVolumeCT = [ + { + "metadata": { + "name": "zookeeper" + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "1Gi" + } + } + } + } +]; + +local kafkaSts = + statefulset.default("kafka", namespace) + + statefulset.spec({serviceName: "broker"}) + + {spec+: {template: {metadata: {labels: kafkaLabel}}}} + + {spec+: {volumeClaimTemplates: kafkaVolumeCT}} + + {spec+: {template+: {spec: {containers: [kafkaContainer], initContainers: [kafkaInitContainer]}}}}; + +local zookeeperSts = + statefulset.default("zoo", namespace) + + statefulset.spec({serviceName: "zoo"}) + + {spec+: {template: {metadata: {labels: zookeeperLabel}}}} + + {spec+: {volumeClaimTemplates: zooVolumeCT}} + + {spec+: {template+: {spec: {containers: [zookeeperContainer], initContainers: [zookeeperInitContainer]}}}}; + +local kafkaSvc = + service.default("kafka", namespace) + + service.spec(k.core.v1.serviceSpec.default()) + + service.mixin.spec.ports({port: 9092}) + + service.mixin.spec.selector({kubeless: "kafka"}); + +local kafkaHeadlessSvc = + service.default("broker", namespace) + + service.spec(k.core.v1.serviceSpec.default()) + + service.mixin.spec.ports({port: 9092}) + + service.mixin.spec.selector({kubeless: "kafka"}) + + {spec+: {clusterIP: "None"}}; + +local zookeeperSvc = + service.default("zookeeper", namespace) + + service.spec(k.core.v1.serviceSpec.default()) + + service.mixin.spec.ports({port: 2181, name: "client"}) + + service.mixin.spec.selector({kubeless: "zookeeper"}); + +local zookeeperHeadlessSvc = + service.default("zoo", namespace) + + service.spec(k.core.v1.serviceSpec.default()) + + service.mixin.spec.ports([{port: 9092, name: "peer"},{port: 3888, name: "leader-election"}]) + + service.mixin.spec.selector({kubeless: "zookeeper"}) + + {spec+: {clusterIP: "None"}}; + +{ + kafkaSts: k.util.prune(kafkaSts), + zookeeperSts: k.util.prune(zookeeperSts), + kafkaSvc: k.util.prune(kafkaSvc), + kafkaHeadlessSvc: k.util.prune(kafkaHeadlessSvc), + zookeeperSvc: k.util.prune(zookeeperSvc), + zookeeperHeadlessSvc: k.util.prune(zookeeperHeadlessSvc), +} diff --git a/kubeless-openshift.jsonnet b/kubeless-openshift.jsonnet index eaa8e54f5..d80a71ad6 100644 --- a/kubeless-openshift.jsonnet +++ b/kubeless-openshift.jsonnet @@ -7,6 +7,4 @@ kubeless + { controller: kubeless.controller + { apiVersion: "extensions/v1beta1" }, controllerClusterRole: kubeless.controllerClusterRole + { apiVersion: "v1" }, controllerClusterRoleBinding: kubeless.controllerClusterRoleBinding + { apiVersion: "v1" }, - kafkaSts: kubeless.kafkaSts + {spec+: {template+: {spec+: { initContainers: [] }}}}, - zookeeperSts: kubeless.zookeeperSts + {spec+: {template+: {spec+: { initContainers: [] }}}} } diff --git a/kubeless.jsonnet b/kubeless.jsonnet index 51a81f2b0..32c78082a 100644 --- a/kubeless.jsonnet +++ b/kubeless.jsonnet @@ -2,7 +2,6 @@ local k = import "ksonnet.beta.1/k.libsonnet"; local objectMeta = k.core.v1.objectMeta; local deployment = k.apps.v1beta1.deployment; -local statefulset = k.apps.v1beta1.statefulSet; local container = k.core.v1.container; local service = k.core.v1.service; local serviceAccount = k.core.v1.serviceAccount; @@ -35,109 +34,7 @@ local controllerContainer = container.imagePullPolicy("IfNotPresent") + container.env(controllerEnv); -local kafkaEnv = [ - { - name: "KAFKA_ADVERTISED_HOST_NAME", - value: "broker.kubeless" - }, - { - name: "KAFKA_ADVERTISED_PORT", - value: "9092" - }, - { - name: "KAFKA_PORT", - value: "9092" - }, - { - name: "KAFKA_DELETE_TOPIC_ENABLE", - value: "true" - }, - { - name: "KAFKA_ZOOKEEPER_CONNECT", - value: "zookeeper.kubeless:2181" - }, - { - name: "ALLOW_PLAINTEXT_LISTENER", - value: "yes" - } -]; - -local zookeeperEnv = [ - { - name: "ZOO_SERVERS", - value: "server.1=zoo-0.zoo:2888:3888:participant" - }, - { - name: "ALLOW_ANONYMOUS_LOGIN", - value: "yes" - } -]; - -local zookeeperPorts = [ - { - containerPort: 2181, - name: "client" - }, - { - containerPort: 2888, - name: "peer" - }, - { - containerPort: 3888, - name: "leader-election" - } -]; - -local kafkaContainer = - container.default("broker", "bitnami/kafka@sha256:0c4be25cd3b31176a4c738da64d988d614b939021bedf7e1b0cc72b37a071ecb") + - container.imagePullPolicy("IfNotPresent") + - container.env(kafkaEnv) + - container.ports({containerPort: 9092}) + - container.livenessProbe({tcpSocket: {port: 9092}, initialDelaySeconds: 30}) + - container.volumeMounts([ - { - name: "datadir", - mountPath: "/bitnami/kafka/data" - } - ]); - -local kafkaInitContainer = - container.default("volume-permissions", "busybox") + - container.imagePullPolicy("IfNotPresent") + - container.command(["sh", "-c", "chmod -R g+rwX /bitnami"]) + - container.volumeMounts([ - { - name: "datadir", - mountPath: "/bitnami/kafka/data" - } - ]); - -local zookeeperContainer = - container.default("zookeeper", "bitnami/zookeeper@sha256:f66625a8a25070bee18fddf42319ec58f0c49c376b19a5eb252e6a4814f07123") + - container.imagePullPolicy("IfNotPresent") + - container.env(zookeeperEnv) + - container.ports(zookeeperPorts) + - container.volumeMounts([ - { - name: "zookeeper", - mountPath: "/bitnami/zookeeper" - } - ]); - -local zookeeperInitContainer = - container.default("volume-permissions", "busybox") + - container.imagePullPolicy("IfNotPresent") + - container.command(["sh", "-c", "chmod -R g+rwX /bitnami"]) + - container.volumeMounts([ - { - name: "zookeeper", - mountPath: "/bitnami/zookeeper" - } - ]); - local kubelessLabel = {kubeless: "controller"}; -local kafkaLabel = {kubeless: "kafka"}; -local zookeeperLabel = {kubeless: "zookeeper"}; local controllerAccount = serviceAccount.default(controller_account_name, namespace); @@ -149,82 +46,6 @@ local controllerDeployment = {spec+: {template+: {spec+: {serviceAccountName: controllerAccount.metadata.name}}}} + {spec+: {template+: {metadata: {labels: kubelessLabel}}}}; -local kafkaVolumeCT = [ - { - "metadata": { - "name": "datadir" - }, - "spec": { - "accessModes": [ - "ReadWriteOnce" - ], - "resources": { - "requests": { - "storage": "1Gi" - } - } - } - } -]; - -local zooVolumeCT = [ - { - "metadata": { - "name": "zookeeper" - }, - "spec": { - "accessModes": [ - "ReadWriteOnce" - ], - "resources": { - "requests": { - "storage": "1Gi" - } - } - } - } -]; - -local kafkaSts = - statefulset.default("kafka", namespace) + - statefulset.spec({serviceName: "broker"}) + - {spec+: {template: {metadata: {labels: kafkaLabel}}}} + - {spec+: {volumeClaimTemplates: kafkaVolumeCT}} + - {spec+: {template+: {spec: {containers: [kafkaContainer], initContainers: [kafkaInitContainer]}}}}; - -local zookeeperSts = - statefulset.default("zoo", namespace) + - statefulset.spec({serviceName: "zoo"}) + - {spec+: {template: {metadata: {labels: zookeeperLabel}}}} + - {spec+: {volumeClaimTemplates: zooVolumeCT}} + - {spec+: {template+: {spec: {containers: [zookeeperContainer], initContainers: [zookeeperInitContainer]}}}}; - -local kafkaSvc = - service.default("kafka", namespace) + - service.spec(k.core.v1.serviceSpec.default()) + - service.mixin.spec.ports({port: 9092}) + - service.mixin.spec.selector({kubeless: "kafka"}); - -local kafkaHeadlessSvc = - service.default("broker", namespace) + - service.spec(k.core.v1.serviceSpec.default()) + - service.mixin.spec.ports({port: 9092}) + - service.mixin.spec.selector({kubeless: "kafka"}) + - {spec+: {clusterIP: "None"}}; - -local zookeeperSvc = - service.default("zookeeper", namespace) + - service.spec(k.core.v1.serviceSpec.default()) + - service.mixin.spec.ports({port: 2181, name: "client"}) + - service.mixin.spec.selector({kubeless: "zookeeper"}); - -local zookeeperHeadlessSvc = - service.default("zoo", namespace) + - service.spec(k.core.v1.serviceSpec.default()) + - service.mixin.spec.ports([{port: 9092, name: "peer"},{port: 3888, name: "leader-election"}]) + - service.mixin.spec.selector({kubeless: "zookeeper"}) + - {spec+: {clusterIP: "None"}}; - local crd = { apiVersion: "apiextensions.k8s.io/v1beta1", kind: "CustomResourceDefinition", @@ -324,12 +145,6 @@ local kubelessConfig = configMap.default("kubeless-config", namespace) + { controllerAccount: k.util.prune(controllerAccount), controller: k.util.prune(controllerDeployment), - kafkaSts: k.util.prune(kafkaSts), - zookeeperSts: k.util.prune(zookeeperSts), - kafkaSvc: k.util.prune(kafkaSvc), - kafkaHeadlessSvc: k.util.prune(kafkaHeadlessSvc), - zookeeperSvc: k.util.prune(zookeeperSvc), - zookeeperHeadlessSvc: k.util.prune(zookeeperHeadlessSvc), crd: k.util.prune(crd), cfg: k.util.prune(kubelessConfig), } From ee9bad1a6e60d8f8721a93a05407676b99683a0e Mon Sep 17 00:00:00 2001 From: Tuna Date: Tue, 13 Feb 2018 11:55:34 +0700 Subject: [PATCH 2/2] enable integration tests on travis rebase and update README deploy kafka in minikube_kafka test matrix rename kafkazk to kafka-zookeeper --- .travis.yml | 6 +++--- Makefile | 4 ++-- README.md | 8 ++++---- docs/GKE-deployment.md | 2 +- kafkazk.jsonnet => kafka-zookeeper.jsonnet | 0 script/libtest.bash | 8 +++++++- tests/integration-tests-kafka.bats | 1 + 7 files changed, 18 insertions(+), 11 deletions(-) rename kafkazk.jsonnet => kafka-zookeeper.jsonnet (100%) diff --git a/.travis.yml b/.travis.yml index 0cdf6f8cd..36fc75f47 100644 --- a/.travis.yml +++ b/.travis.yml @@ -54,9 +54,9 @@ before_install: # or if the build is from the "master" branch minikube_kafka) if [[ "$TRAVIS_PULL_REQUEST" != false ]]; then - pr_kafka_title=$(curl -H "Authorization: token ${GITHUB_TOKEN}" "https://api.github.com/repos/$TRAVIS_REPO_SLUG/pulls/${TRAVIS_PULL_REQUEST}" | grep title | grep -i kafka || true) + pr_kafka_title=$(curl "https://api.github.com/repos/$TRAVIS_REPO_SLUG/pulls/${TRAVIS_PULL_REQUEST}" | grep title || true) fi - if [[ "$TRAVIS_PULL_REQUEST" == false || "$pr_kafka_title" != "" ]]; then + if [[ "$TRAVIS_PULL_REQUEST" == false || "$pr_kafka_title" == "" || "$pr_kafka_title" =~ ^.*(Kafka|kafka|KAFKA).*$ ]]; then export SHOULD_TEST=1 fi ;; @@ -162,7 +162,7 @@ deploy: - kubeless-${TRAVIS_TAG}.yaml - kubeless-rbac-${TRAVIS_TAG}.yaml - kubeless-openshift-${TRAVIS_TAG}.yaml - - kafkazk-${TRAVIS_TAG}.yaml + - kafka-zookeeper-${TRAVIS_TAG}.yaml - bundles/kubeless_*.zip skip_cleanup: true overwrite: true diff --git a/Makefile b/Makefile index 1426e12a5..c2d0b47e9 100644 --- a/Makefile +++ b/Makefile @@ -37,7 +37,7 @@ binary-cross: $(KUBECFG) show -o yaml $< > $@.tmp mv $@.tmp $@ -all-yaml: kubeless.yaml kubeless-rbac.yaml kubeless-openshift.yaml kafkazk.yaml +all-yaml: kubeless.yaml kubeless-rbac.yaml kubeless-openshift.yaml kafka-zookeeper.yaml kubeless.yaml: kubeless.jsonnet @@ -45,7 +45,7 @@ kubeless-rbac.yaml: kubeless-rbac.jsonnet kubeless.jsonnet kubeless-openshift.yaml: kubeless-openshift.jsonnet kubeless-rbac.jsonnet -kafkazk.yaml: kafkazk.jsonnet +kafka-zookeeper.yaml: kafka-zookeeper.jsonnet docker/controller: controller-build cp $(BUNDLES)/kubeless_$(OS)-$(ARCH)/kubeless-controller $@ diff --git a/README.md b/README.md index deb1fe235..47ad9affe 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ `kubeless` is a Kubernetes-native serverless framework that lets you deploy small bits of code without having to worry about the underlying infrastructure plumbing. It leverages Kubernetes resources to provide auto-scaling, API routing, monitoring, troubleshooting and more. -Kubeless stands out as we use a [Custom Resource Definition](https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/) to be able to create functions as custom kubernetes resources. We then run an in-cluster controller that watches these custom resources and launches _runtimes_ on-demand. The controller dynamically injects the functions code into the runtimes and make them available over HTTP or via a PubSub mechanism (optinal). +Kubeless stands out as we use a [Custom Resource Definition](https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/) to be able to create functions as custom kubernetes resources. We then run an in-cluster controller that watches these custom resources and launches _runtimes_ on-demand. The controller dynamically injects the functions code into the runtimes and make them available over HTTP or via a PubSub mechanism. Kubeless is purely open-source and non-affiliated to any commercial organization. Chime in at anytime, we would love the help and feedback ! @@ -40,7 +40,7 @@ There are several kubeless manifests being shipped for multiple k8s environments We also provide an optional `kafka-zookeeper` statefulset manifest to give you a handy option to try out the PubSub mechanism. -* [`kafkazk-$RELEASE.yaml`](https://github.com/kubeless/kubeless/releases/download/$RELEASE/kafkazk-$RELEASE.yaml) +* [`kafka-zookeeper-$RELEASE.yaml`](https://github.com/kubeless/kubeless/releases/download/$RELEASE/kafka-zookeeper-$RELEASE.yaml) For example, this below is a show case of deploying kubeless to a non-RBAC Kubernetes cluster. @@ -159,7 +159,7 @@ Kubeless also supports [ingress](https://kubernetes.io/docs/concepts/services-ne ### PubSub function -NOTE: Set of Kafka-Zookeeper are required to deploy pubsub function at this moment. Please consider to use our provided manifest to deploy kafka and zookeeper statefulsets, as Kubeless hasn't been configured to integrate with existing kafka/zookeeper system. Kafka statefulset uses a PVC (persistent volume claim). Depending on the configuration of your cluster you may need to provision a PV (Persistent Volume) that matches the PVC or configure dynamic storage provisioning. Otherwise Kafka pod will fail to get scheduled. Please refer to [PV](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) documentation on how to provision storage for PVC. +We provide several [PubSub runtimes](https://hub.docker.com/r/kubeless/),which has suffix `event-consumer`, specified for languages that help you to quickly deploy your function with PubSub mechanism. The PubSub function will expect to consume input messages from a predefined Kafka topic which means Kafka is required. In Kubeless [release page](https://github.com/kubeless/kubeless/releases), you can find the manifest to quickly deploy a collection of Kafka and Zookeeper statefulsets. Once deployed, you can verify two statefulsets up and running: @@ -169,7 +169,7 @@ NAME DESIRED CURRENT AGE kafka 1 1 40s zoo 1 1 42s -$ kubectl -n kubeless get svc +$ kubectl -n kubeless get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE broker ClusterIP None 9092/TCP 1m kafka ClusterIP 10.55.250.89 9092/TCP 1m diff --git a/docs/GKE-deployment.md b/docs/GKE-deployment.md index 98dfe8491..2537ebed9 100644 --- a/docs/GKE-deployment.md +++ b/docs/GKE-deployment.md @@ -76,7 +76,7 @@ kubectl create -f https://github.com/kubeless/kubeless/releases/download/v$KUBEL Optionally, if you want to go with PubSub function, please also deploy our provided Kafka/Zookeeper system: ``` -kubectl create -f https://github.com/kubeless/kubeless/releases/download/v$KUBELESS_VERSION/kafkazk-v$KUBELESS_VERSION.yaml +kubectl create -f https://github.com/kubeless/kubeless/releases/download/v$KUBELESS_VERSION/kafka-zookeeper-v$KUBELESS_VERSION.yaml ``` ## Kubeless on GKE 1.8.x diff --git a/kafkazk.jsonnet b/kafka-zookeeper.jsonnet similarity index 100% rename from kafkazk.jsonnet rename to kafka-zookeeper.jsonnet diff --git a/script/libtest.bash b/script/libtest.bash index 97e888923..669dc7499 100644 --- a/script/libtest.bash +++ b/script/libtest.bash @@ -16,6 +16,7 @@ KUBELESS_MANIFEST=kubeless.yaml KUBELESS_MANIFEST_RBAC=kubeless-rbac.yaml +KAFKA_MANIFEST=kafka-zookeeper.yaml KUBECTL_BIN=$(which kubectl) : ${KUBECTL_BIN:?ERROR: missing binary: kubectl} @@ -38,7 +39,7 @@ kubectl() { k8s_wait_for_pod_ready() { echo_info "Waiting for pod '${@}' to be ready ... " local -i cnt=${TEST_MAX_WAIT_SEC:?} - + # Retries just in case it is not stable local -i successCount=0 while [ "$successCount" -lt "3" ]; do @@ -240,6 +241,11 @@ redeploy_with_rbac_roles() { _wait_for_kubeless_controller_logline "controller synced and ready" } +deploy_kafka() { + echo_info "Deploy kafka ... " + kubectl create -f $KAFKA_MANIFEST +} + deploy_function() { local func=${1:?} func_topic echo_info "TEST: $func" diff --git a/tests/integration-tests-kafka.bats b/tests/integration-tests-kafka.bats index ca2918a3f..4099a8385 100644 --- a/tests/integration-tests-kafka.bats +++ b/tests/integration-tests-kafka.bats @@ -18,6 +18,7 @@ load ../script/libtest # 'bats' lacks loop support, unroll-them-all -> @test "Wait for kafka" { + deploy_kafka wait_for_kubeless_kafka_server_ready } @test "Test function: pubsub-python" {