Skip to content
This repository has been archived by the owner on Dec 15, 2021. It is now read-only.

decoupling Function, Trigger and Runtimes #620

Merged
merged 41 commits into from
Mar 22, 2018
Merged
Show file tree
Hide file tree
Changes from 33 commits
Commits
Show all changes
41 commits
Select commit Hold shift + click to select a range
d1c51f0
CRD API objects for functions, http, kafka, cronjob triggers and
murali-reddy Mar 6, 2018
1bf648f
CRD controllers for the function, kafka triggers, cronjob triggers, http
murali-reddy Mar 7, 2018
10705cb
new server side binaries kubeless-controller-manager and kafka-contro…
murali-reddy Mar 7, 2018
989f10b
updated CLI to incorporate implict creation of triggers with
murali-reddy Mar 7, 2018
7d4b74e
updated to build scripts
murali-reddy Mar 7, 2018
655da7f
updated to manifests
murali-reddy Mar 7, 2018
1fabea6
Updates to examples used in integration tests
murali-reddy Mar 7, 2018
416d27a
kafka even consumer with new go kafka lib dependencies
murali-reddy Mar 7, 2018
41d136a
update vendor libs
murali-reddy Mar 7, 2018
ed682eb
update docker files
murali-reddy Mar 7, 2018
20d9485
updates to utils and langruntime
murali-reddy Mar 7, 2018
004c4a9
update .travis.yaml
murali-reddy Mar 7, 2018
72afaab
delete old controller docker file
murali-reddy Mar 7, 2018
44ed20b
better kafka even consumer logging
murali-reddy Mar 7, 2018
343e5d5
fix travis ci issue related to kafka broker
murali-reddy Mar 8, 2018
bb47001
keep the old GKE version 1.7.12-gke.1
murali-reddy Mar 8, 2018
2d78977
Use bitnami-bot docker account
Mar 8, 2018
180a178
Fix http request method for the Kafka consumer
Mar 8, 2018
5d769cd
Push Kafka image
Mar 8, 2018
b70c3ab
Fix content-type in consumer HTTP message
Mar 8, 2018
53ef2e7
Fix GKE version
Mar 8, 2018
c67f31b
Removed duplicated files
Mar 8, 2018
c406af1
Update runtime docs
Mar 8, 2018
85cff13
Do proper check for topic update
Mar 8, 2018
17c035f
skip IT test with trigger update through
murali-reddy Mar 9, 2018
6d593e5
Add CPU limit argument to function commands (#606)
SomeoneWeird Feb 28, 2018
c9216f5
Merge branch 'master' into function-trigger-runtimes
murali-reddy Mar 13, 2018
6174c59
fix glide.lock and vendor libs, that got messed up with master merge
murali-reddy Mar 14, 2018
01518b7
addressing review comments
murali-reddy Mar 15, 2018
01f8175
update .gitignore with new controller images
murali-reddy Mar 15, 2018
af2808f
Review
Mar 16, 2018
c0f4ab4
Review
Mar 16, 2018
358f8cb
use 3-way merge Patch only to update the CRD objects
murali-reddy Mar 17, 2018
81cba44
nodejs, remove runtime internal data from sandbox, avoid code concat …
cscheiber Mar 19, 2018
764b8c8
Revert "use 3-way merge Patch only to update the CRD objects"
murali-reddy Mar 20, 2018
0215d4f
Avoid reflect.DeepEqual for deployment comparison
Mar 20, 2018
25d300c
Fix empty body for NodeJS functions
Mar 20, 2018
200b867
fix GKE CI failures due to upstream issue https://github.com/kubernet…
murali-reddy Mar 20, 2018
5fe1681
Fix error catching in Python runtime. Remove old files
Mar 21, 2018
cd56789
Send message as jsons if necessary in kafka-consumer
Mar 21, 2018
fd63962
Move serviceSpec to function object. Fix service modifications
Mar 21, 2018
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -145,8 +145,10 @@ artifacts/
# Kubeless specific
bats/
bundles/
docker/controller/kubeless-controller
docker/controller-manager/kubeless-controller-manager
docker/kafka-controller/kafka-controller
ksonnet-lib/
kubeless-openshift.yaml
kubeless-rbac.yaml
kubeless.yaml
kafka-zookeeper.yaml
8 changes: 6 additions & 2 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,11 @@ env:
- TEST_TARGET=minikube_kafka
- TEST_TARGET=GKE
global:
- CONTROLLER_IMAGE_NAME=bitnami/kubeless-controller
- CONTROLLER_IMAGE_NAME=bitnami/kubeless-controller-manager
- CONTROLLER_TAG=${TRAVIS_TAG:-build-$TRAVIS_BUILD_ID}
- CONTROLLER_IMAGE=${CONTROLLER_IMAGE_NAME}:${CONTROLLER_TAG}
- KAFKA_CONTROLLER_IMAGE_NAME=bitnami/kafka-trigger-controller
- KAFKA_CONTROLLER_IMAGE=${KAFKA_CONTROLLER_IMAGE_NAME}:${CONTROLLER_TAG}
- CGO_ENABLED=0
- TEST_DEBUG=1
- GKE_VERSION=1.7.12-gke.2
Expand Down Expand Up @@ -92,10 +94,12 @@ script:
if [[ "$SHOULD_TEST" == "1" ]]; then
make VERSION=${TRAVIS_TAG:-build-$TRAVIS_BUILD_ID} binary
make controller-image CONTROLLER_IMAGE=$CONTROLLER_IMAGE
make kafka-controller-image KAFKA_CONTROLLER_IMAGE=$KAFKA_CONTROLLER_IMAGE
make all-yaml
sed -i.bak 's/'":latest"'/'":${CONTROLLER_TAG}"'/g' kubeless.yaml
sed -i.bak 's/'":latest"'/'":${CONTROLLER_TAG}"'/g' kubeless-rbac.yaml
sed -i.bak 's/'":latest"'/'":${CONTROLLER_TAG}"'/g' kubeless-openshift.yaml
sed -i.bak 's/'":latest"'/'":${CONTROLLER_TAG}"'/g' kafka-zookeeper.yaml
case $TEST_TARGET in
unit)
make test
Expand All @@ -112,6 +116,7 @@ script:
GKE)
docker login -u="$DOCKER_USERNAME" -p="$DOCKER_PASSWORD"
docker push $CONTROLLER_IMAGE
docker push $KAFKA_CONTROLLER_IMAGE
echo "Waiting for the GKE cluster to be ready"
tail -f $TRAVIS_BUILD_DIR/gke-start.log &
wait $GKE_START_PID
Expand Down Expand Up @@ -152,7 +157,6 @@ before_deploy:
sed 's/'":${CONTROLLER_TAG}"'/'"@$NEW_DIGEST"'/g' kubeless.yaml > kubeless-${TRAVIS_TAG}.yaml
sed 's/'":${CONTROLLER_TAG}"'/'"@$NEW_DIGEST"'/g' kubeless-rbac.yaml > kubeless-rbac-${TRAVIS_TAG}.yaml
sed 's/'":${CONTROLLER_TAG}"'/'"@$NEW_DIGEST"'/g' kubeless-openshift.yaml > kubeless-openshift-${TRAVIS_TAG}.yaml
cp kafka-zookeeper.yaml kafka-zookeeper-${TRAVIS_TAG}.yaml
fi

deploy:
Expand Down
20 changes: 15 additions & 5 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@ VERSION = dev-$(shell date +%FT%T%z)

KUBECFG = kubecfg
DOCKER = docker
CONTROLLER_IMAGE = kubeless-controller:latest
CONTROLLER_IMAGE = kubeless-controller-manager:latest
KAFKA_CONTROLLER_IMAGE = kafka-trigger-controller:latest
OS = linux
ARCH = amd64
BUNDLES = bundles
Expand Down Expand Up @@ -47,15 +48,24 @@ kubeless-openshift.yaml: kubeless-openshift.jsonnet kubeless-rbac.jsonnet

kafka-zookeeper.yaml: kafka-zookeeper.jsonnet

docker/controller: controller-build
cp $(BUNDLES)/kubeless_$(OS)-$(ARCH)/kubeless-controller $@
docker/controller-manager: controller-build
cp $(BUNDLES)/kubeless_$(OS)-$(ARCH)/kubeless-controller-manager $@

controller-build:
./script/binary-controller -os=$(OS) -arch=$(ARCH)

controller-image: docker/controller
controller-image: docker/controller-manager
$(DOCKER) build -t $(CONTROLLER_IMAGE) $<

docker/kafka-controller: kafka-controller-build
cp $(BUNDLES)/kubeless_$(OS)-$(ARCH)/kafka-controller $@

kafka-controller-build:
./script/kafka-controller.sh -os=$(OS) -arch=$(ARCH)

kafka-controller-image: docker/kafka-controller
$(DOCKER) build -t $(KAFKA_CONTROLLER_IMAGE) $<

update:
./hack/update-codegen.sh

Expand Down Expand Up @@ -102,4 +112,4 @@ bootstrap: bats ksonnet-lib
fi

build_and_test:
./script/start-test-environment.sh "make binary && make controller-image CONTROLLER_IMAGE=bitnami/kubeless-controller:latest && make integration-tests"
./script/start-test-environment.sh "make binary && make controller-image CONTROLLER_IMAGE=bitnami/kubeless-controller-manager:latest && make integration-tests"
68 changes: 68 additions & 0 deletions cmd/kafka-trigger-controller/kafka-controller.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
/*
Copyright (c) 2016-2017 Bitnami

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package main

import (
"fmt"
"os"
"os/signal"
"syscall"

"github.com/kubeless/kubeless/pkg/controller"
"github.com/kubeless/kubeless/pkg/utils"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)

const (
globalUsage = `` //TODO: adding explanation
)

var rootCmd = &cobra.Command{
Use: "kafka-controller",
Short: "Kafka controller",
Long: globalUsage,
Run: func(cmd *cobra.Command, args []string) {
kubelessClient, err := utils.GetFunctionClientInCluster()
if err != nil {
logrus.Fatalf("Cannot get kubeless client: %v", err)
}

kafkaTriggerCfg := controller.KafkaTriggerConfig{
TriggerClient: kubelessClient,
}

kafkaTriggerController := controller.NewKafkaTriggerController(kafkaTriggerCfg)

stopCh := make(chan struct{})
defer close(stopCh)

go kafkaTriggerController.Run(stopCh)

sigterm := make(chan os.Signal, 1)
signal.Notify(sigterm, syscall.SIGTERM)
signal.Notify(sigterm, syscall.SIGINT)
<-sigterm
},
}

func main() {
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
"syscall"

monitoringv1alpha1 "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1"
"github.com/kubeless/kubeless/pkg/client/informers/externalversions"
"github.com/kubeless/kubeless/pkg/controller"
"github.com/kubeless/kubeless/pkg/utils"
"github.com/sirupsen/logrus"
Expand All @@ -47,10 +48,18 @@ var rootCmd = &cobra.Command{
logrus.Fatalf("Cannot get kubeless client: %v", err)
}

cfg := controller.Config{
functionCfg := controller.Config{
KubeCli: utils.GetClient(),
FunctionClient: kubelessClient,
}
httpTriggerCfg := controller.HTTPTriggerConfig{
KubeCli: utils.GetClient(),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I haven't looked at the stack in sufficient detail, but I suspect we should re-use the kubecli client across these, so we can reuse the HTTP client connection(s).

Ideally of course, these would all use SharedInformers so we can reuse the watch connections..

TriggerClient: kubelessClient,
}
cronJobTriggerCfg := controller.CronJobTriggerConfig{
KubeCli: utils.GetClient(),
TriggerClient: kubelessClient,
}

restCfg, err := rest.InClusterConfig()
if err != nil {
Expand All @@ -62,11 +71,18 @@ var rootCmd = &cobra.Command{
logrus.Fatal(err)
}

c := controller.New(cfg, smclient)
sharedInformerFactory := externalversions.NewSharedInformerFactory(kubelessClient, 0)

functionController := controller.NewFunctionController(functionCfg, smclient)
httpTriggerController := controller.NewHTTPTriggerController(httpTriggerCfg, sharedInformerFactory)
cronJobTriggerController := controller.NewCronJobTriggerController(cronJobTriggerCfg, sharedInformerFactory)

stopCh := make(chan struct{})
defer close(stopCh)

go c.Run(stopCh)
go functionController.Run(stopCh)
go httpTriggerController.Run(stopCh)
go cronJobTriggerController.Run(stopCh)

sigterm := make(chan os.Signal, 1)
signal.Notify(sigterm, syscall.SIGTERM)
Expand Down
19 changes: 10 additions & 9 deletions cmd/kubeless/autoscale/autoscaleCreate.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,6 @@ var autoscaleCreateCmd = &cobra.Command{
ns = utils.GetDefaultNamespace()
}

function, err := utils.GetFunction(funcName, ns)
if err != nil {
logrus.Fatalf("Unable to find the function %s. Received %s: ", funcName, err)
}

min, err := cmd.Flags().GetInt32("min")
if err != nil {
logrus.Fatal(err)
Expand All @@ -55,18 +50,24 @@ var autoscaleCreateCmd = &cobra.Command{
logrus.Fatal(err)
}

hpa, err := getHorizontalAutoscaleDefinition(funcName, ns, metric, min, max, value, function.ObjectMeta.Labels)
kubelessClient, err := utils.GetKubelessClientOutCluster()
if err != nil {
logrus.Fatal(err)
}
function.Spec.HorizontalPodAutoscaler = hpa

kubelessClient, err := utils.GetFunctionClientOutCluster()
function, err := utils.GetFunctionCustomResource(kubelessClient, funcName, ns)
if err != nil {
logrus.Fatalf("Unable to find the function %s. Received %s: ", funcName, err)
}

hpa, err := getHorizontalAutoscaleDefinition(funcName, ns, metric, min, max, value, function.ObjectMeta.Labels)
if err != nil {
logrus.Fatal(err)
}
function.Spec.HorizontalPodAutoscaler = hpa

logrus.Infof("Adding autoscaling rule to the function...")
err = utils.UpdateK8sCustomResource(kubelessClient, &function)
err = utils.UpdateFunctionCustomResource(kubelessClient, function)
if err != nil {
logrus.Fatal(err)
}
Expand Down
17 changes: 9 additions & 8 deletions cmd/kubeless/autoscale/autoscaleDelete.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,23 +38,24 @@ var autoscaleDeleteCmd = &cobra.Command{
ns = utils.GetDefaultNamespace()
}

function, err := utils.GetFunction(funcName, ns)
kubelessClient, err := utils.GetKubelessClientOutCluster()
if err != nil {
logrus.Fatal(err)
}

function, err := utils.GetFunctionCustomResource(kubelessClient, funcName, ns)
if err != nil {
logrus.Fatalf("Unable to find the function %s. Received %s: ", funcName, err)
}

if function.Spec.HorizontalPodAutoscaler.Name != "" {
function.Spec.HorizontalPodAutoscaler = v2beta1.HorizontalPodAutoscaler{}
kubelessClient, err := utils.GetFunctionClientOutCluster()
if err != nil {
logrus.Fatal(err)
}
logrus.Infof("Removing autoscaling rule to the function...")
err = utils.UpdateK8sCustomResource(kubelessClient, &function)
logrus.Infof("Removing autoscaling rule from the function...")
err = utils.UpdateFunctionCustomResource(kubelessClient, function)
if err != nil {
logrus.Fatal(err)
}
logrus.Infof("Removed Autoscaling rule for %s", funcName)
logrus.Infof("Removed Autoscaling rule from %s", funcName)
} else {
logrus.Fatalf("Not found an auto scale definition for %s", funcName)
}
Expand Down
10 changes: 10 additions & 0 deletions cmd/kubeless/function/call.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"fmt"
"strconv"
"strings"
"time"

"github.com/kubeless/kubeless/pkg/utils"
"github.com/sirupsen/logrus"
Expand Down Expand Up @@ -83,6 +84,15 @@ var callCmd = &cobra.Command{
// So we need to manually build the URL
req = req.AbsPath(svc.ObjectMeta.SelfLink + ":" + port + "/proxy/")
}
timestamp := time.Now().UTC()
eventID, err := utils.GetRandString(11)
if err != nil {
logrus.Fatalf("Unable to generate ID %v", err)
}
req.SetHeader("event-id", eventID)
req.SetHeader("event-type", "application/json")
req.SetHeader("event-time", timestamp.String())
req.SetHeader("event-namespace", "cli.kubeless.io")
res, err := req.Do().Raw()
if err != nil {
// Properly interpret line breaks
Expand Down
4 changes: 2 additions & 2 deletions cmd/kubeless/function/delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,12 @@ var deleteCmd = &cobra.Command{
ns = utils.GetDefaultNamespace()
}

kubelessClient, err := utils.GetFunctionClientOutCluster()
kubelessClient, err := utils.GetKubelessClientOutCluster()
if err != nil {
logrus.Fatal(err)
}

err = utils.DeleteK8sCustomResource(kubelessClient, funcName, ns)
err = utils.DeleteFunctionCustomResource(kubelessClient, funcName, ns)
if err != nil {
logrus.Fatal(err)
}
Expand Down
Loading