From 5562f0799d8476c37590b5ec2961980a88696eff Mon Sep 17 00:00:00 2001
From: David Farr
Date: Thu, 23 Mar 2023 14:16:24 -0700
Subject: [PATCH] feat: Kafka EventBus (#2502)
Signed-off-by: David Farr
Signed-off-by: Prema devi Kuppuswamy
Signed-off-by: Bilal Bakht Ahmad
Co-authored-by: Prema <107519450+premadk@users.noreply.github.com>
Co-authored-by: Bilal Bakht Ahmad
Co-authored-by: Bilal Bakht Ahmad
Signed-off-by: Aalok
---
.github/workflows/ci.yaml | 3 +-
Makefile | 9 +-
api/event-bus.html | 188 ++++
api/event-bus.md | 187 +++
api/jsonschema/schema.json | 54 +
api/openapi-spec/swagger.json | 54 +
common/leaderelection/leaderelection.go | 2 +-
common/util.go | 9 +-
common/util_test.go | 4 +-
controllers/eventbus/installer/installer.go | 2 +
controllers/eventbus/installer/kafka.go | 47 +
controllers/eventbus/installer/kafka_test.go | 53 +
controllers/eventbus/validate.go | 9 +-
controllers/eventbus/validate_test.go | 25 +
controllers/eventsource/controller_test.go | 46 +
controllers/eventsource/resource.go | 100 +-
controllers/eventsource/resource_test.go | 110 +-
controllers/sensor/resource.go | 102 +-
controllers/sensor/resource_test.go | 156 ++-
docs/concepts/eventbus.md | 8 +-
docs/eventbus/eventbus.md | 4 +-
docs/eventbus/kafka.md | 52 +
docs/eventsources/ha.md | 7 +-
docs/sensors/ha.md | 8 +-
eventbus/common/interface.go | 6 +-
eventbus/driver.go | 19 +-
eventbus/driver_test.go | 9 +-
eventbus/jetstream/sensor/sensor_jetstream.go | 3 +-
eventbus/kafka/base/kafka.go | 112 ++
eventbus/kafka/base/kafka_conn.go | 13 +
eventbus/kafka/base/utils.go | 51 +
eventbus/kafka/base/utils_test.go | 82 ++
eventbus/kafka/eventsource/source_conn.go | 55 +
eventbus/kafka/eventsource/source_kafka.go | 55 +
eventbus/kafka/sensor/kafka_handler.go | 224 ++++
eventbus/kafka/sensor/kafka_sensor.go | 369 ++++++
eventbus/kafka/sensor/kafka_transaction.go | 99 ++
eventbus/kafka/sensor/trigger_conn.go | 91 ++
eventbus/kafka/sensor/trigger_handler.go | 155 +++
eventbus/stan/sensor/sensor_stan.go | 3 +-
eventsources/sources/kafka/start.go | 2 +
mkdocs.yml | 1 +
pkg/apis/common/common.go | 1 +
pkg/apis/eventbus/v1alpha1/eventbus_types.go | 5 +
pkg/apis/eventbus/v1alpha1/generated.pb.go | 1000 ++++++++++++++---
pkg/apis/eventbus/v1alpha1/generated.proto | 47 +
pkg/apis/eventbus/v1alpha1/kafka_eventbus.go | 38 +
.../eventbus/v1alpha1/openapi_generated.go | 104 +-
.../v1alpha1/zz_generated.deepcopy.go | 57 +
pkg/apis/events/event-data.go | 2 +
sensors/listener.go | 16 +-
test/e2e/fixtures/e2e_suite.go | 10 +
test/e2e/fixtures/when.go | 6 +-
test/e2e/functional_test.go | 23 +-
test/e2e/testdata/es-calendar-ha.yaml | 2 +-
test/e2e/testdata/es-calendar-metrics.yaml | 4 +-
test/e2e/testdata/es-calendar.yaml | 4 +-
test/e2e/testdata/es-durable-consumer.yaml | 2 +
test/e2e/testdata/es-multi-dep.yaml | 2 +
test/e2e/testdata/es-multi-sensor.yaml | 2 +
.../e2e/testdata/es-test-metrics-webhook.yaml | 2 +
test/e2e/testdata/es-trigger-spec-change.yaml | 2 +
test/e2e/testdata/es-webhook.yaml | 2 +
...ling.yaml => sensor-atleastonce-fail.yaml} | 1 +
...e.yaml => sensor-atleastonce-succeed.yaml} | 1 +
...iling.yaml => sensor-atmostonce-fail.yaml} | 1 +
...le.yaml => sensor-atmostonce-succeed.yaml} | 1 +
.../e2e/testdata/sensor-durable-consumer.yaml | 1 +
test/e2e/testdata/sensor-log-ha.yaml | 2 +
test/e2e/testdata/sensor-log-metrics.yaml | 2 +
test/e2e/testdata/sensor-log.yaml | 2 +
test/e2e/testdata/sensor-multi-dep.yaml | 1 +
.../sensor-multi-sensor-2-atleastonce.yaml | 1 +
test/e2e/testdata/sensor-multi-sensor-2.yaml | 1 +
.../sensor-multi-sensor-atleastonce.yaml | 1 +
test/e2e/testdata/sensor-multi-sensor.yaml | 1 +
test/e2e/testdata/sensor-resource.yaml | 2 +
test/e2e/testdata/sensor-test-metrics.yaml | 2 +
.../sensor-trigger-spec-change-2.yaml | 1 +
.../testdata/sensor-trigger-spec-change.yaml | 1 +
test/manifests/kafka/kafka.yaml | 54 +
test/manifests/kafka/kustomization.yaml | 6 +
test/manifests/kafka/zookeeper.yaml | 38 +
test/util/util.go | 2 +-
84 files changed, 3695 insertions(+), 346 deletions(-)
create mode 100644 controllers/eventbus/installer/kafka.go
create mode 100644 controllers/eventbus/installer/kafka_test.go
create mode 100644 docs/eventbus/kafka.md
create mode 100644 eventbus/kafka/base/kafka.go
create mode 100644 eventbus/kafka/base/kafka_conn.go
create mode 100644 eventbus/kafka/base/utils.go
create mode 100644 eventbus/kafka/base/utils_test.go
create mode 100644 eventbus/kafka/eventsource/source_conn.go
create mode 100644 eventbus/kafka/eventsource/source_kafka.go
create mode 100644 eventbus/kafka/sensor/kafka_handler.go
create mode 100644 eventbus/kafka/sensor/kafka_sensor.go
create mode 100644 eventbus/kafka/sensor/kafka_transaction.go
create mode 100644 eventbus/kafka/sensor/trigger_conn.go
create mode 100644 eventbus/kafka/sensor/trigger_handler.go
create mode 100644 pkg/apis/eventbus/v1alpha1/kafka_eventbus.go
rename test/e2e/testdata/{sensor-atleastonce-failing.yaml => sensor-atleastonce-fail.yaml} (93%)
rename test/e2e/testdata/{sensor-atleastonce-triggerable.yaml => sensor-atleastonce-succeed.yaml} (91%)
rename test/e2e/testdata/{sensor-atmostonce-failing.yaml => sensor-atmostonce-fail.yaml} (93%)
rename test/e2e/testdata/{sensor-atmostonce-triggerable.yaml => sensor-atmostonce-succeed.yaml} (90%)
create mode 100644 test/manifests/kafka/kafka.yaml
create mode 100644 test/manifests/kafka/kustomization.yaml
create mode 100644 test/manifests/kafka/zookeeper.yaml
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index ed693e15c1..e018985904 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -113,7 +113,7 @@ jobs:
e2e-tests:
name: E2E Tests
runs-on: ubuntu-latest
- timeout-minutes: 20
+ timeout-minutes: 25
env:
KUBECONFIG: /home/runner/.kubeconfig
strategy:
@@ -123,6 +123,7 @@ jobs:
include:
- driver: stan
- driver: jetstream
+ - driver: kafka
steps:
- name: Checkout code
uses: actions/checkout@v3
diff --git a/Makefile b/Makefile
index a5a4a9d012..4c1d08b8aa 100644
--- a/Makefile
+++ b/Makefile
@@ -87,7 +87,14 @@ test:
go test $(shell go list ./... | grep -v /vendor/ | grep -v /test/e2e/) -race -short -v
test-functional:
- go test -v -timeout 15m -count 1 --tags functional -p 1 ./test/e2e
+ifeq ($(EventBusDriver),kafka)
+ kubectl -n argo-events apply -k test/manifests/kafka
+ kubectl -n argo-events wait -l statefulset.kubernetes.io/pod-name=kafka-0 --for=condition=ready pod --timeout=60s
+endif
+ go test -v -timeout 20m -count 1 --tags functional -p 1 ./test/e2e
+ifeq ($(EventBusDriver),kafka)
+ kubectl -n argo-events delete -k test/manifests/kafka
+endif
# to run just one of the functional e2e tests by name (i.e. 'make TestMetricsWithWebhook'):
Test%:
diff --git a/api/event-bus.html b/api/event-bus.html
index d6c93e6af0..8059c1d885 100644
--- a/api/event-bus.html
+++ b/api/event-bus.html
@@ -63,6 +63,19 @@ BusConfig
(Optional)
+
+
+kafka
+
+
+KafkaBus
+
+
+ |
+
+(Optional)
+ |
+
ContainerTemplate
@@ -188,6 +201,20 @@ EventBus
(Optional)
+
+
+kafka
+
+
+KafkaBus
+
+
+ |
+
+(Optional)
+ Kafka eventbus
+ |
+
@@ -250,6 +277,20 @@ EventBusSpec
(Optional)
+
+
+kafka
+
+
+KafkaBus
+
+
+ |
+
+(Optional)
+ Kafka eventbus
+ |
+
EventBusStatus
@@ -632,6 +673,153 @@ JetStreamConfig
+KafkaBus
+
+
+(Appears on:
+BusConfig,
+EventBusSpec)
+
+
+
KafkaBus holds the KafkaBus EventBus information
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+url
+
+string
+
+ |
+
+ URL to kafka cluster, multiple URLs separated by comma
+ |
+
+
+
+topic
+
+string
+
+ |
+
+(Optional)
+ Topic name, defaults to {namespace_name}-{eventbus_name}
+ |
+
+
+
+version
+
+string
+
+ |
+
+(Optional)
+ Kafka version, sarama defaults to the oldest supported stable version
+ |
+
+
+
+tls
+
+github.com/argoproj/argo-events/pkg/apis/common.TLSConfig
+
+ |
+
+(Optional)
+ TLS configuration for the kafka client.
+ |
+
+
+
+sasl
+
+github.com/argoproj/argo-events/pkg/apis/common.SASLConfig
+
+ |
+
+(Optional)
+ SASL configuration for the kafka client
+ |
+
+
+
+consumerGroup
+
+
+KafkaConsumerGroup
+
+
+ |
+
+(Optional)
+ Consumer group for kafka client
+ |
+
+
+
+KafkaConsumerGroup
+
+
+(Appears on:
+KafkaBus)
+
+
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+groupName
+
+string
+
+ |
+
+(Optional)
+ Consumer group name, defaults to {namespace_name}-{sensor_name}
+ |
+
+
+
+rebalanceStrategy
+
+string
+
+ |
+
+(Optional)
+ Rebalance strategy can be one of: sticky, roundrobin, range. Range is the default.
+ |
+
+
+
+startOldest
+
+bool
+
+ |
+
+(Optional)
+ When starting up a new group do we want to start from the oldest event (true) or the newest event (false), defaults to false
+ |
+
+
+
NATSBus
diff --git a/api/event-bus.md b/api/event-bus.md
index e4c1642e42..95d9c2d58a 100644
--- a/api/event-bus.md
+++ b/api/event-bus.md
@@ -74,6 +74,15 @@ NATSConfig
(Optional)
+
+
+kafka
+KafkaBus
+ |
+
+(Optional)
+ |
+
@@ -190,6 +199,18 @@ NATS eventbus
(Optional)
+
+
+kafka
+KafkaBus
+ |
+
+(Optional)
+
+Kafka eventbus
+
+ |
+
@@ -250,6 +271,18 @@ NATS eventbus
(Optional)
+
+
+kafka
+KafkaBus
+ |
+
+(Optional)
+
+Kafka eventbus
+
+ |
+
@@ -635,6 +668,160 @@ Secret for auth
+
+KafkaBus
+
+
+(Appears on:
+BusConfig,
+EventBusSpec)
+
+
+
+KafkaBus holds the KafkaBus EventBus information
+
+
+
+
+
+
+Field
+ |
+
+Description
+ |
+
+
+
+
+
+url string
+ |
+
+
+URL to kafka cluster, multiple URLs separated by comma
+
+ |
+
+
+
+topic string
+ |
+
+(Optional)
+
+Topic name, defaults to {namespace_name}-{eventbus_name}
+
+ |
+
+
+
+version string
+ |
+
+(Optional)
+
+Kafka version, sarama defaults to the oldest supported stable version
+
+ |
+
+
+
+tls
+github.com/argoproj/argo-events/pkg/apis/common.TLSConfig
+ |
+
+(Optional)
+
+TLS configuration for the kafka client.
+
+ |
+
+
+
+sasl
+github.com/argoproj/argo-events/pkg/apis/common.SASLConfig
+ |
+
+(Optional)
+
+SASL configuration for the kafka client
+
+ |
+
+
+
+consumerGroup
+ KafkaConsumerGroup
+
+ |
+
+(Optional)
+
+Consumer group for kafka client
+
+ |
+
+
+
+
+KafkaConsumerGroup
+
+
+(Appears on:
+KafkaBus)
+
+
+
+
+
+
+
+Field
+ |
+
+Description
+ |
+
+
+
+
+
+groupName string
+ |
+
+(Optional)
+
+Consumer group name, defaults to {namespace_name}-{sensor_name}
+
+ |
+
+
+
+rebalanceStrategy string
+ |
+
+(Optional)
+
+Rebalance strategy can be one of: sticky, roundrobin, range. Range is
+the default.
+
+ |
+
+
+
+startOldest bool
+ |
+
+(Optional)
+
+When starting up a new group do we want to start from the oldest event
+(true) or the newest event (false), defaults to false
+
+ |
+
+
+
NATSBus
diff --git a/api/jsonschema/schema.json b/api/jsonschema/schema.json
index 105e92925e..4d0890071a 100644
--- a/api/jsonschema/schema.json
+++ b/api/jsonschema/schema.json
@@ -287,6 +287,9 @@
"jetstream": {
"$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.JetStreamConfig"
},
+ "kafka": {
+ "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.KafkaBus"
+ },
"nats": {
"$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.NATSConfig"
}
@@ -377,6 +380,10 @@
"jetstream": {
"$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.JetStreamBus"
},
+ "kafka": {
+ "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.KafkaBus",
+ "description": "Kafka eventbus"
+ },
"nats": {
"$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.NATSBus",
"description": "NATS eventbus"
@@ -516,6 +523,53 @@
},
"type": "object"
},
+ "io.argoproj.eventbus.v1alpha1.KafkaBus": {
+ "description": "KafkaBus holds the KafkaBus EventBus information",
+ "properties": {
+ "consumerGroup": {
+ "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.KafkaConsumerGroup",
+ "description": "Consumer group for kafka client"
+ },
+ "sasl": {
+ "$ref": "#/definitions/io.argoproj.common.SASLConfig",
+ "description": "SASL configuration for the kafka client"
+ },
+ "tls": {
+ "$ref": "#/definitions/io.argoproj.common.TLSConfig",
+ "description": "TLS configuration for the kafka client."
+ },
+ "topic": {
+ "description": "Topic name, defaults to {namespace_name}-{eventbus_name}",
+ "type": "string"
+ },
+ "url": {
+ "description": "URL to kafka cluster, multiple URLs separated by comma",
+ "type": "string"
+ },
+ "version": {
+ "description": "Kafka version, sarama defaults to the oldest supported stable version",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "io.argoproj.eventbus.v1alpha1.KafkaConsumerGroup": {
+ "properties": {
+ "groupName": {
+ "description": "Consumer group name, defaults to {namespace_name}-{sensor_name}",
+ "type": "string"
+ },
+ "rebalanceStrategy": {
+ "description": "Rebalance strategy can be one of: sticky, roundrobin, range. Range is the default.",
+ "type": "string"
+ },
+ "startOldest": {
+ "description": "When starting up a new group do we want to start from the oldest event (true) or the newest event (false), defaults to false",
+ "type": "boolean"
+ }
+ },
+ "type": "object"
+ },
"io.argoproj.eventbus.v1alpha1.NATSBus": {
"description": "NATSBus holds the NATS eventbus information",
"properties": {
diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json
index 38a415b200..b59c86596e 100644
--- a/api/openapi-spec/swagger.json
+++ b/api/openapi-spec/swagger.json
@@ -289,6 +289,9 @@
"jetstream": {
"$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.JetStreamConfig"
},
+ "kafka": {
+ "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.KafkaBus"
+ },
"nats": {
"$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.NATSConfig"
}
@@ -370,6 +373,10 @@
"jetstream": {
"$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.JetStreamBus"
},
+ "kafka": {
+ "description": "Kafka eventbus",
+ "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.KafkaBus"
+ },
"nats": {
"description": "NATS eventbus",
"$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.NATSBus"
@@ -508,6 +515,53 @@
}
}
},
+ "io.argoproj.eventbus.v1alpha1.KafkaBus": {
+ "description": "KafkaBus holds the KafkaBus EventBus information",
+ "type": "object",
+ "properties": {
+ "consumerGroup": {
+ "description": "Consumer group for kafka client",
+ "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.KafkaConsumerGroup"
+ },
+ "sasl": {
+ "description": "SASL configuration for the kafka client",
+ "$ref": "#/definitions/io.argoproj.common.SASLConfig"
+ },
+ "tls": {
+ "description": "TLS configuration for the kafka client.",
+ "$ref": "#/definitions/io.argoproj.common.TLSConfig"
+ },
+ "topic": {
+ "description": "Topic name, defaults to {namespace_name}-{eventbus_name}",
+ "type": "string"
+ },
+ "url": {
+ "description": "URL to kafka cluster, multiple URLs separated by comma",
+ "type": "string"
+ },
+ "version": {
+ "description": "Kafka version, sarama defaults to the oldest supported stable version",
+ "type": "string"
+ }
+ }
+ },
+ "io.argoproj.eventbus.v1alpha1.KafkaConsumerGroup": {
+ "type": "object",
+ "properties": {
+ "groupName": {
+ "description": "Consumer group name, defaults to {namespace_name}-{sensor_name}",
+ "type": "string"
+ },
+ "rebalanceStrategy": {
+ "description": "Rebalance strategy can be one of: sticky, roundrobin, range. Range is the default.",
+ "type": "string"
+ },
+ "startOldest": {
+ "description": "When starting up a new group do we want to start from the oldest event (true) or the newest event (false), defaults to false",
+ "type": "boolean"
+ }
+ }
+ },
"io.argoproj.eventbus.v1alpha1.NATSBus": {
"description": "NATSBus holds the NATS eventbus information",
"type": "object",
diff --git a/common/leaderelection/leaderelection.go b/common/leaderelection/leaderelection.go
index dc7a500bde..f3ed2218ea 100644
--- a/common/leaderelection/leaderelection.go
+++ b/common/leaderelection/leaderelection.go
@@ -40,7 +40,7 @@ type LeaderCallbacks struct {
func NewElector(ctx context.Context, eventBusConfig eventbusv1alpha1.BusConfig, clusterName string, clusterSize int, namespace string, leasename string, hostname string) (Elector, error) {
switch {
- case strings.ToLower(os.Getenv(common.EnvVarLeaderElection)) == "k8s":
+ case eventBusConfig.Kafka != nil || strings.ToLower(os.Getenv(common.EnvVarLeaderElection)) == "k8s":
return newKubernetesElector(namespace, leasename, hostname)
case eventBusConfig.NATS != nil:
return newEventBusElector(ctx, eventBusConfig.NATS.Auth, clusterName, clusterSize, eventBusConfig.NATS.URL)
diff --git a/common/util.go b/common/util.go
index 02a5b51b61..12a5525c12 100644
--- a/common/util.go
+++ b/common/util.go
@@ -277,13 +277,18 @@ func GetTLSConfig(config *apicommon.TLSConfig) (*tls.Config, error) {
// VolumesFromSecretsOrConfigMaps builds volumes and volumeMounts spec based on
// the obj and its children's secretKeyselector or configMapKeySelector
-func VolumesFromSecretsOrConfigMaps(obj interface{}, t reflect.Type) ([]v1.Volume, []v1.VolumeMount) {
+func VolumesFromSecretsOrConfigMaps(t reflect.Type, objs ...interface{}) ([]v1.Volume, []v1.VolumeMount) {
resultVolumes := []v1.Volume{}
resultMounts := []v1.VolumeMount{}
- values := findTypeValues(obj, t)
+ values := []interface{}{}
+
+ for _, obj := range objs {
+ values = append(values, findTypeValues(obj, t)...)
+ }
if len(values) == 0 {
return resultVolumes, resultMounts
}
+
switch t {
case SecretKeySelectorType:
for _, v := range values {
diff --git a/common/util_test.go b/common/util_test.go
index e9cb9b8a13..35ce0d1bfa 100644
--- a/common/util_test.go
+++ b/common/util_test.go
@@ -162,13 +162,13 @@ func TestFindTypeValues(t *testing.T) {
func TestVolumesFromSecretsOrConfigMaps(t *testing.T) {
t.Run("test secret volumes", func(t *testing.T) {
- vols, mounts := VolumesFromSecretsOrConfigMaps(&testXObj, SecretKeySelectorType)
+ vols, mounts := VolumesFromSecretsOrConfigMaps(SecretKeySelectorType, &testXObj)
assert.Equal(t, len(vols), 6)
assert.Equal(t, len(mounts), 6)
})
t.Run("test configmap volumes", func(t *testing.T) {
- vols, mounts := VolumesFromSecretsOrConfigMaps(&testXObj, ConfigMapKeySelectorType)
+ vols, mounts := VolumesFromSecretsOrConfigMaps(ConfigMapKeySelectorType, &testXObj)
assert.Equal(t, len(vols), 6)
assert.Equal(t, len(mounts), 6)
})
diff --git a/controllers/eventbus/installer/installer.go b/controllers/eventbus/installer/installer.go
index 7c211ee04a..ab686ec6ca 100644
--- a/controllers/eventbus/installer/installer.go
+++ b/controllers/eventbus/installer/installer.go
@@ -49,6 +49,8 @@ func getInstaller(eventBus *v1alpha1.EventBus, client client.Client, kubeClient
}
} else if js := eventBus.Spec.JetStream; js != nil {
return NewJetStreamInstaller(client, eventBus, config, getLabels(eventBus), kubeClient, logger), nil
+ } else if kafka := eventBus.Spec.Kafka; kafka != nil {
+ return NewExoticKafkaInstaller(eventBus, logger), nil
}
return nil, fmt.Errorf("invalid eventbus spec")
}
diff --git a/controllers/eventbus/installer/kafka.go b/controllers/eventbus/installer/kafka.go
new file mode 100644
index 0000000000..5b0feacfd4
--- /dev/null
+++ b/controllers/eventbus/installer/kafka.go
@@ -0,0 +1,47 @@
+package installer
+
+import (
+ "context"
+ "fmt"
+
+ "go.uber.org/zap"
+
+ "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1"
+)
+
+// exoticKafkaInstaller is an inalleration implementation of exotic kafka config.
+type exoticKafkaInstaller struct {
+ eventBus *v1alpha1.EventBus
+
+ logger *zap.SugaredLogger
+}
+
+// NewExoticKafkaInstaller return a new exoticKafkaInstaller
+func NewExoticKafkaInstaller(eventBus *v1alpha1.EventBus, logger *zap.SugaredLogger) Installer {
+ return &exoticKafkaInstaller{
+ eventBus: eventBus,
+ logger: logger.Named("exotic-kafka"),
+ }
+}
+
+func (i *exoticKafkaInstaller) Install(ctx context.Context) (*v1alpha1.BusConfig, error) {
+ kafkaObj := i.eventBus.Spec.Kafka
+ if kafkaObj == nil {
+ return nil, fmt.Errorf("invalid request")
+ }
+ if kafkaObj.Topic == "" {
+ kafkaObj.Topic = fmt.Sprintf("%s-%s", i.eventBus.Namespace, i.eventBus.Name)
+ }
+
+ i.eventBus.Status.MarkDeployed("Skipped", "Skip deployment because of using exotic config.")
+ i.logger.Info("use exotic config")
+ busConfig := &v1alpha1.BusConfig{
+ Kafka: kafkaObj,
+ }
+ return busConfig, nil
+}
+
+func (i *exoticKafkaInstaller) Uninstall(ctx context.Context) error {
+ i.logger.Info("nothing to uninstall")
+ return nil
+}
diff --git a/controllers/eventbus/installer/kafka_test.go b/controllers/eventbus/installer/kafka_test.go
new file mode 100644
index 0000000000..e5a045a13a
--- /dev/null
+++ b/controllers/eventbus/installer/kafka_test.go
@@ -0,0 +1,53 @@
+package installer
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/argoproj/argo-events/common/logging"
+ "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1"
+)
+
+const (
+ testKafkaName = "test-kafka"
+ testKafkaURL = "kafka:9092"
+)
+
+var (
+ testKafkaExoticBus = &v1alpha1.EventBus{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: v1alpha1.SchemeGroupVersion.String(),
+ Kind: "EventBus",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: testNamespace,
+ Name: testKafkaName,
+ },
+ Spec: v1alpha1.EventBusSpec{
+ Kafka: &v1alpha1.KafkaBus{
+ URL: testKafkaURL,
+ },
+ },
+ }
+)
+
+func TestInstallationKafkaExotic(t *testing.T) {
+ t.Run("installation with exotic kafka config", func(t *testing.T) {
+ installer := NewExoticKafkaInstaller(testKafkaExoticBus, logging.NewArgoEventsLogger())
+ conf, err := installer.Install(context.TODO())
+ assert.NoError(t, err)
+ assert.NotNil(t, conf.Kafka)
+ assert.Equal(t, conf.Kafka.URL, testKafkaURL)
+ })
+}
+
+func TestUninstallationKafkaExotic(t *testing.T) {
+ t.Run("uninstallation with exotic kafka config", func(t *testing.T) {
+ installer := NewExoticKafkaInstaller(testKafkaExoticBus, logging.NewArgoEventsLogger())
+ err := installer.Uninstall(context.TODO())
+ assert.NoError(t, err)
+ })
+}
diff --git a/controllers/eventbus/validate.go b/controllers/eventbus/validate.go
index 9af22c4520..889b41b4a5 100644
--- a/controllers/eventbus/validate.go
+++ b/controllers/eventbus/validate.go
@@ -8,8 +8,8 @@ import (
// ValidateEventBus accepts an EventBus and performs validation against it
func ValidateEventBus(eb *v1alpha1.EventBus) error {
- if eb.Spec.NATS == nil && eb.Spec.JetStream == nil {
- return fmt.Errorf("invalid spec: either \"nats\" or \"jetstream\" needs to be specified")
+ if eb.Spec.NATS == nil && eb.Spec.JetStream == nil && eb.Spec.Kafka == nil {
+ return fmt.Errorf("invalid spec: either \"nats\", \"jetstream\", or \"kafka\" needs to be specified")
}
if x := eb.Spec.NATS; x != nil {
if x.Native != nil && x.Exotic != nil {
@@ -36,5 +36,10 @@ func ValidateEventBus(eb *v1alpha1.EventBus) error {
return fmt.Errorf("invalid spec: a jetstream eventbus requires at least 3 replicas")
}
}
+ if x := eb.Spec.Kafka; x != nil {
+ if x.URL == "" {
+ return fmt.Errorf("\"spec.kafka.url\" is missing")
+ }
+ }
return nil
}
diff --git a/controllers/eventbus/validate_test.go b/controllers/eventbus/validate_test.go
index e6ce1bd763..8c4ce69667 100644
--- a/controllers/eventbus/validate_test.go
+++ b/controllers/eventbus/validate_test.go
@@ -38,6 +38,18 @@ var (
},
},
}
+
+ testKafkaEventBus = &v1alpha1.EventBus{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "test-ns",
+ Name: common.DefaultEventBusName,
+ },
+ Spec: v1alpha1.EventBusSpec{
+ Kafka: &v1alpha1.KafkaBus{
+ URL: "127.0.0.1:9092",
+ },
+ },
+ }
)
func TestValidate(t *testing.T) {
@@ -51,6 +63,11 @@ func TestValidate(t *testing.T) {
assert.NoError(t, err)
})
+ t.Run("test good kafka eventbus", func(t *testing.T) {
+ err := ValidateEventBus(testKafkaEventBus)
+ assert.NoError(t, err)
+ })
+
t.Run("test bad eventbus", func(t *testing.T) {
eb := testNatsEventBus.DeepCopy()
eb.Spec.NATS = nil
@@ -109,4 +126,12 @@ func TestValidate(t *testing.T) {
err = ValidateEventBus(eb)
assert.NoError(t, err)
})
+
+ t.Run("test kafka eventbus no URL", func(t *testing.T) {
+ eb := testKafkaEventBus.DeepCopy()
+ eb.Spec.Kafka.URL = ""
+ err := ValidateEventBus(eb)
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "\"spec.kafka.url\" is missing")
+ })
}
diff --git a/controllers/eventsource/controller_test.go b/controllers/eventsource/controller_test.go
index 880a8c2215..2b35bff9af 100644
--- a/controllers/eventsource/controller_test.go
+++ b/controllers/eventsource/controller_test.go
@@ -72,6 +72,52 @@ var (
},
},
}
+
+ fakeEventBusJetstream = &eventbusv1alpha1.EventBus{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: eventbusv1alpha1.SchemeGroupVersion.String(),
+ Kind: "EventBus",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: testNamespace,
+ Name: common.DefaultEventBusName,
+ },
+ Spec: eventbusv1alpha1.EventBusSpec{
+ JetStream: &eventbusv1alpha1.JetStreamBus{
+ Version: "x.x.x",
+ },
+ },
+ Status: eventbusv1alpha1.EventBusStatus{
+ Config: eventbusv1alpha1.BusConfig{
+ JetStream: &eventbusv1alpha1.JetStreamConfig{
+ URL: "nats://xxxx",
+ },
+ },
+ },
+ }
+
+ fakeEventBusKafka = &eventbusv1alpha1.EventBus{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: eventbusv1alpha1.SchemeGroupVersion.String(),
+ Kind: "EventBus",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: testNamespace,
+ Name: common.DefaultEventBusName,
+ },
+ Spec: eventbusv1alpha1.EventBusSpec{
+ Kafka: &eventbusv1alpha1.KafkaBus{
+ URL: "localhost:9092",
+ },
+ },
+ Status: eventbusv1alpha1.EventBusStatus{
+ Config: eventbusv1alpha1.BusConfig{
+ Kafka: &eventbusv1alpha1.KafkaBus{
+ URL: "localhost:9092",
+ },
+ },
+ },
+ }
)
func fakeEmptyEventSource() *v1alpha1.EventSource {
diff --git a/controllers/eventsource/resource.go b/controllers/eventsource/resource.go
index 14d5feddc5..5159b39453 100644
--- a/controllers/eventsource/resource.go
+++ b/controllers/eventsource/resource.go
@@ -177,11 +177,15 @@ func buildDeployment(args *AdaptorArgs, eventBus *eventbusv1alpha1.EventBus) (*a
if err != nil {
return nil, fmt.Errorf("failed marshal eventsource spec")
}
- encodedEventSourceSpec := base64.StdEncoding.EncodeToString(eventSourceBytes)
- envVars := []corev1.EnvVar{
+ busConfigBytes, err := json.Marshal(eventBus.Status.Config)
+ if err != nil {
+ return nil, fmt.Errorf("failed marshal event bus config: %v", err)
+ }
+
+ env := []corev1.EnvVar{
{
Name: common.EnvVarEventSourceObject,
- Value: encodedEventSourceSpec,
+ Value: base64.StdEncoding.EncodeToString(eventSourceBytes),
},
{
Name: common.EnvVarEventBusSubject,
@@ -195,34 +199,42 @@ func buildDeployment(args *AdaptorArgs, eventBus *eventbusv1alpha1.EventBus) (*a
Name: common.EnvVarLeaderElection,
Value: args.EventSource.Annotations[common.AnnotationLeaderElection],
},
+ {
+ Name: common.EnvVarEventBusConfig,
+ Value: base64.StdEncoding.EncodeToString(busConfigBytes),
+ },
}
- busConfigBytes, err := json.Marshal(eventBus.Status.Config)
- if err != nil {
- return nil, fmt.Errorf("failed marshal event bus config: %v", err)
+ volumes := []corev1.Volume{
+ {
+ Name: "tmp",
+ VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}},
+ },
}
- encodedBusConfig := base64.StdEncoding.EncodeToString(busConfigBytes)
- envVars = append(envVars, corev1.EnvVar{Name: common.EnvVarEventBusConfig, Value: encodedBusConfig})
+
+ volumeMounts := []corev1.VolumeMount{
+ {
+ Name: "tmp",
+ MountPath: "/tmp",
+ },
+ }
+
+ var secretObjs []interface{}
var accessSecret *corev1.SecretKeySelector
switch {
case eventBus.Status.Config.NATS != nil:
- natsConf := eventBus.Status.Config.NATS
- accessSecret = natsConf.AccessSecret
+ accessSecret = eventBus.Status.Config.NATS.AccessSecret
+ secretObjs = []interface{}{eventSourceCopy}
case eventBus.Status.Config.JetStream != nil:
- jsConf := eventBus.Status.Config.JetStream
- accessSecret = jsConf.AccessSecret
+ accessSecret = eventBus.Status.Config.JetStream.AccessSecret
+ secretObjs = []interface{}{eventSourceCopy}
+ case eventBus.Status.Config.Kafka != nil:
+ accessSecret = nil
+ secretObjs = []interface{}{eventSourceCopy, eventBus} // kafka requires secrets for sasl and tls
default:
return nil, fmt.Errorf("unsupported event bus")
}
- volumes := deploymentSpec.Template.Spec.Volumes
- volumeMounts := deploymentSpec.Template.Spec.Containers[0].VolumeMounts
- emptyDirVolName := "tmp"
- volumes = append(volumes, corev1.Volume{
- Name: emptyDirVolName, VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}},
- })
- volumeMounts = append(volumeMounts, corev1.VolumeMount{Name: emptyDirVolName, MountPath: "/tmp"})
-
if accessSecret != nil {
// Mount the secret as volume instead of using envFrom to gain the ability
// for the sensor deployment to auto reload when the secret changes
@@ -240,42 +252,25 @@ func buildDeployment(args *AdaptorArgs, eventBus *eventbusv1alpha1.EventBus) (*a
},
},
})
- volumeMounts = append(volumeMounts, corev1.VolumeMount{Name: "auth-volume", MountPath: common.EventBusAuthFileMountPath})
+ volumeMounts = append(volumeMounts, corev1.VolumeMount{
+ Name: "auth-volume",
+ MountPath: common.EventBusAuthFileMountPath,
+ })
}
- deploymentSpec.Template.Spec.Volumes = volumes
- deploymentSpec.Template.Spec.Containers[0].VolumeMounts = volumeMounts
- envs := deploymentSpec.Template.Spec.Containers[0].Env
- envs = append(envs, envVars...)
- deploymentSpec.Template.Spec.Containers[0].Env = envs
+ // secrets
+ volSecrets, volSecretMounts := common.VolumesFromSecretsOrConfigMaps(common.SecretKeySelectorType, secretObjs...)
+ volumes = append(volumes, volSecrets...)
+ volumeMounts = append(volumeMounts, volSecretMounts...)
- vols := []corev1.Volume{}
- volMounts := []corev1.VolumeMount{}
- oldVols := deploymentSpec.Template.Spec.Volumes
- oldVolMounts := deploymentSpec.Template.Spec.Containers[0].VolumeMounts
- if len(oldVols) > 0 {
- vols = append(vols, oldVols...)
- }
- if len(oldVolMounts) > 0 {
- volMounts = append(volMounts, oldVolMounts...)
- }
- volSecrets, volSecretMounts := common.VolumesFromSecretsOrConfigMaps(eventSourceCopy, common.SecretKeySelectorType)
- if len(volSecrets) > 0 {
- vols = append(vols, volSecrets...)
- }
- if len(volSecretMounts) > 0 {
- volMounts = append(volMounts, volSecretMounts...)
- }
- volConfigMaps, volCofigMapMounts := common.VolumesFromSecretsOrConfigMaps(eventSourceCopy, common.ConfigMapKeySelectorType)
- if len(volConfigMaps) > 0 {
- vols = append(vols, volConfigMaps...)
- }
- if len(volCofigMapMounts) > 0 {
- volMounts = append(volMounts, volCofigMapMounts...)
- }
+ // config maps
+ volConfigMaps, volCofigMapMounts := common.VolumesFromSecretsOrConfigMaps(common.ConfigMapKeySelectorType, eventSourceCopy)
+ volumeMounts = append(volumeMounts, volCofigMapMounts...)
+ volumes = append(volumes, volConfigMaps...)
- deploymentSpec.Template.Spec.Volumes = vols
- deploymentSpec.Template.Spec.Containers[0].VolumeMounts = volMounts
+ deploymentSpec.Template.Spec.Containers[0].Env = append(deploymentSpec.Template.Spec.Containers[0].Env, env...)
+ deploymentSpec.Template.Spec.Containers[0].VolumeMounts = append(deploymentSpec.Template.Spec.Containers[0].VolumeMounts, volumeMounts...)
+ deploymentSpec.Template.Spec.Volumes = append(deploymentSpec.Template.Spec.Volumes, volumes...)
deployment := &appv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
@@ -288,6 +283,7 @@ func buildDeployment(args *AdaptorArgs, eventBus *eventbusv1alpha1.EventBus) (*a
if err := controllerscommon.SetObjectMeta(args.EventSource, deployment, v1alpha1.SchemaGroupVersionKind); err != nil {
return nil, err
}
+
return deployment, nil
}
diff --git a/controllers/eventsource/resource_test.go b/controllers/eventsource/resource_test.go
index dc2d2b5269..e5609d060a 100644
--- a/controllers/eventsource/resource_test.go
+++ b/controllers/eventsource/resource_test.go
@@ -12,6 +12,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/argoproj/argo-events/common/logging"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+ eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1"
"github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1"
)
@@ -69,6 +71,55 @@ func Test_BuildDeployment(t *testing.T) {
assert.True(t, secretRefs > 0)
assert.Equal(t, deployment.Spec.Template.Spec.PriorityClassName, "test-class")
})
+
+ t.Run("test kafka eventbus secrets attached", func(t *testing.T) {
+ args := &AdaptorArgs{
+ Image: testImage,
+ EventSource: testEventSource,
+ Labels: testLabels,
+ }
+
+ // add secrets to kafka eventbus
+ testBus := fakeEventBusKafka.DeepCopy()
+ testBus.Spec.Kafka.TLS = &apicommon.TLSConfig{
+ CACertSecret: &corev1.SecretKeySelector{Key: "cert", LocalObjectReference: corev1.LocalObjectReference{Name: "tls-secret"}},
+ }
+ testBus.Spec.Kafka.SASL = &apicommon.SASLConfig{
+ Mechanism: "SCRAM-SHA-512",
+ UserSecret: &corev1.SecretKeySelector{Key: "username", LocalObjectReference: corev1.LocalObjectReference{Name: "sasl-secret"}},
+ PasswordSecret: &corev1.SecretKeySelector{Key: "password", LocalObjectReference: corev1.LocalObjectReference{Name: "sasl-secret"}},
+ }
+
+ deployment, err := buildDeployment(args, testBus)
+ assert.Nil(t, err)
+ assert.NotNil(t, deployment)
+
+ hasSASLSecretVolume := false
+ hasSASLSecretVolumeMount := false
+ hasTLSSecretVolume := false
+ hasTLSSecretVolumeMount := false
+ for _, volume := range deployment.Spec.Template.Spec.Volumes {
+ if volume.Name == "secret-sasl-secret" {
+ hasSASLSecretVolume = true
+ }
+ if volume.Name == "secret-tls-secret" {
+ hasTLSSecretVolume = true
+ }
+ }
+ for _, volumeMount := range deployment.Spec.Template.Spec.Containers[0].VolumeMounts {
+ if volumeMount.Name == "secret-sasl-secret" {
+ hasSASLSecretVolumeMount = true
+ }
+ if volumeMount.Name == "secret-tls-secret" {
+ hasTLSSecretVolumeMount = true
+ }
+ }
+
+ assert.True(t, hasSASLSecretVolume)
+ assert.True(t, hasSASLSecretVolumeMount)
+ assert.True(t, hasTLSSecretVolume)
+ assert.True(t, hasTLSSecretVolumeMount)
+ })
}
func TestResourceReconcile(t *testing.T) {
@@ -86,35 +137,38 @@ func TestResourceReconcile(t *testing.T) {
assert.False(t, testEventSource.Status.IsReady())
})
- t.Run("test resource reconcile with eventbus", func(t *testing.T) {
- ctx := context.TODO()
- cl := fake.NewClientBuilder().Build()
- testBus := fakeEventBus.DeepCopy()
- testBus.Status.MarkDeployed("test", "test")
- testBus.Status.MarkConfigured()
- err := cl.Create(ctx, testBus)
- assert.Nil(t, err)
- args := &AdaptorArgs{
- Image: testImage,
- EventSource: testEventSource,
- Labels: testLabels,
- }
- err = Reconcile(cl, args, logging.NewArgoEventsLogger())
- assert.Nil(t, err)
- assert.True(t, testEventSource.Status.IsReady())
+ for _, eb := range []*eventbusv1alpha1.EventBus{fakeEventBus, fakeEventBusJetstream, fakeEventBusKafka} {
+ testBus := eb.DeepCopy()
- deployList := &appv1.DeploymentList{}
- err = cl.List(ctx, deployList, &client.ListOptions{
- Namespace: testNamespace,
- })
- assert.NoError(t, err)
- assert.Equal(t, 1, len(deployList.Items))
+ t.Run("test resource reconcile with eventbus", func(t *testing.T) {
+ ctx := context.TODO()
+ cl := fake.NewClientBuilder().Build()
+ testBus.Status.MarkDeployed("test", "test")
+ testBus.Status.MarkConfigured()
+ err := cl.Create(ctx, testBus)
+ assert.Nil(t, err)
+ args := &AdaptorArgs{
+ Image: testImage,
+ EventSource: testEventSource,
+ Labels: testLabels,
+ }
+ err = Reconcile(cl, args, logging.NewArgoEventsLogger())
+ assert.Nil(t, err)
+ assert.True(t, testEventSource.Status.IsReady())
+
+ deployList := &appv1.DeploymentList{}
+ err = cl.List(ctx, deployList, &client.ListOptions{
+ Namespace: testNamespace,
+ })
+ assert.NoError(t, err)
+ assert.Equal(t, 1, len(deployList.Items))
- svcList := &corev1.ServiceList{}
- err = cl.List(ctx, svcList, &client.ListOptions{
- Namespace: testNamespace,
+ svcList := &corev1.ServiceList{}
+ err = cl.List(ctx, svcList, &client.ListOptions{
+ Namespace: testNamespace,
+ })
+ assert.NoError(t, err)
+ assert.Equal(t, 0, len(svcList.Items))
})
- assert.NoError(t, err)
- assert.Equal(t, 0, len(svcList.Items))
- })
+ }
}
diff --git a/controllers/sensor/resource.go b/controllers/sensor/resource.go
index ec4285ba1a..cc573bc36d 100644
--- a/controllers/sensor/resource.go
+++ b/controllers/sensor/resource.go
@@ -150,8 +150,12 @@ func buildDeployment(args *AdaptorArgs, eventBus *eventbusv1alpha1.EventBus) (*a
if err != nil {
return nil, fmt.Errorf("failed marshal sensor spec")
}
- encodedSensorSpec := base64.StdEncoding.EncodeToString(sensorBytes)
- envVars := []corev1.EnvVar{
+ busConfigBytes, err := json.Marshal(eventBus.Status.Config)
+ if err != nil {
+ return nil, fmt.Errorf("failed marshal event bus config: %v", err)
+ }
+
+ env := []corev1.EnvVar{
{
Name: common.EnvVarEventBusSubject,
Value: fmt.Sprintf("eventbus-%s", args.Sensor.Namespace),
@@ -164,41 +168,48 @@ func buildDeployment(args *AdaptorArgs, eventBus *eventbusv1alpha1.EventBus) (*a
Name: common.EnvVarLeaderElection,
Value: args.Sensor.Annotations[common.AnnotationLeaderElection],
},
+ {
+ Name: common.EnvVarEventBusConfig,
+ Value: base64.StdEncoding.EncodeToString(busConfigBytes),
+ },
}
if !args.Sensor.Spec.LiveReload {
- envVars = append(envVars, corev1.EnvVar{
+ env = append(env, corev1.EnvVar{
Name: common.EnvVarSensorObject,
- Value: encodedSensorSpec,
+ Value: base64.StdEncoding.EncodeToString(sensorBytes),
})
}
- busConfigBytes, err := json.Marshal(eventBus.Status.Config)
- if err != nil {
- return nil, fmt.Errorf("failed marshal event bus config: %v", err)
+ volumes := []corev1.Volume{
+ {
+ Name: "tmp",
+ VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}},
+ },
}
- encodedBusConfig := base64.StdEncoding.EncodeToString(busConfigBytes)
- envVars = append(envVars, corev1.EnvVar{Name: common.EnvVarEventBusConfig, Value: encodedBusConfig})
+ volumeMounts := []corev1.VolumeMount{
+ {
+ Name: "tmp",
+ MountPath: "/tmp",
+ },
+ }
+
+ var secretObjs []interface{}
var accessSecret *corev1.SecretKeySelector
switch {
case eventBus.Status.Config.NATS != nil:
- natsConf := eventBus.Status.Config.NATS
- accessSecret = natsConf.AccessSecret
+ accessSecret = eventBus.Status.Config.NATS.AccessSecret
+ secretObjs = []interface{}{sensorCopy}
case eventBus.Status.Config.JetStream != nil:
- jsConf := eventBus.Status.Config.JetStream
- accessSecret = jsConf.AccessSecret
+ accessSecret = eventBus.Status.Config.JetStream.AccessSecret
+ secretObjs = []interface{}{sensorCopy}
+ case eventBus.Status.Config.Kafka != nil:
+ accessSecret = nil
+ secretObjs = []interface{}{sensorCopy, eventBus} // kafka requires secrets for sasl and tls
default:
return nil, fmt.Errorf("unsupported event bus")
}
- volumes := deploymentSpec.Template.Spec.Volumes
- volumeMounts := deploymentSpec.Template.Spec.Containers[0].VolumeMounts
- emptyDirVolName := "tmp"
- volumes = append(volumes, corev1.Volume{
- Name: emptyDirVolName, VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}},
- })
- volumeMounts = append(volumeMounts, corev1.VolumeMount{Name: emptyDirVolName, MountPath: "/tmp"})
-
if accessSecret != nil {
// Mount the secret as volume instead of using envFrom to gain the ability
// for the sensor deployment to auto reload when the secret changes
@@ -216,7 +227,10 @@ func buildDeployment(args *AdaptorArgs, eventBus *eventbusv1alpha1.EventBus) (*a
},
},
})
- volumeMounts = append(volumeMounts, corev1.VolumeMount{Name: "auth-volume", MountPath: common.EventBusAuthFileMountPath})
+ volumeMounts = append(volumeMounts, corev1.VolumeMount{
+ Name: "auth-volume",
+ MountPath: common.EventBusAuthFileMountPath,
+ })
}
if args.Sensor.Spec.LiveReload {
@@ -230,40 +244,19 @@ func buildDeployment(args *AdaptorArgs, eventBus *eventbusv1alpha1.EventBus) (*a
volumeMounts = append(volumeMounts, corev1.VolumeMount{Name: "sensor-config-volume", MountPath: "/sensor-definition"})
}
- deploymentSpec.Template.Spec.Volumes = volumes
- deploymentSpec.Template.Spec.Containers[0].VolumeMounts = volumeMounts
+ // secrets
+ volSecrets, volSecretMounts := common.VolumesFromSecretsOrConfigMaps(common.SecretKeySelectorType, secretObjs...)
+ volumes = append(volumes, volSecrets...)
+ volumeMounts = append(volumeMounts, volSecretMounts...)
- envs := deploymentSpec.Template.Spec.Containers[0].Env
- envs = append(envs, envVars...)
- deploymentSpec.Template.Spec.Containers[0].Env = envs
+ // config maps
+ volConfigMaps, volCofigMapMounts := common.VolumesFromSecretsOrConfigMaps(common.ConfigMapKeySelectorType, sensorCopy)
+ volumeMounts = append(volumeMounts, volCofigMapMounts...)
+ volumes = append(volumes, volConfigMaps...)
- vols := []corev1.Volume{}
- volMounts := []corev1.VolumeMount{}
- oldVols := deploymentSpec.Template.Spec.Volumes
- oldVolMounts := deploymentSpec.Template.Spec.Containers[0].VolumeMounts
- if len(oldVols) > 0 {
- vols = append(vols, oldVols...)
- }
- if len(oldVolMounts) > 0 {
- volMounts = append(volMounts, oldVolMounts...)
- }
- volSecrets, volSecretMounts := common.VolumesFromSecretsOrConfigMaps(sensorCopy, common.SecretKeySelectorType)
- if len(volSecrets) > 0 {
- vols = append(vols, volSecrets...)
- }
- if len(volSecretMounts) > 0 {
- volMounts = append(volMounts, volSecretMounts...)
- }
- volConfigMaps, volCofigMapMounts := common.VolumesFromSecretsOrConfigMaps(sensorCopy, common.ConfigMapKeySelectorType)
- if len(volConfigMaps) > 0 {
- vols = append(vols, volConfigMaps...)
- }
- if len(volCofigMapMounts) > 0 {
- volMounts = append(volMounts, volCofigMapMounts...)
- }
-
- deploymentSpec.Template.Spec.Volumes = vols
- deploymentSpec.Template.Spec.Containers[0].VolumeMounts = volMounts
+ deploymentSpec.Template.Spec.Containers[0].Env = append(deploymentSpec.Template.Spec.Containers[0].Env, env...)
+ deploymentSpec.Template.Spec.Containers[0].VolumeMounts = append(deploymentSpec.Template.Spec.Containers[0].VolumeMounts, volumeMounts...)
+ deploymentSpec.Template.Spec.Volumes = append(deploymentSpec.Template.Spec.Volumes, volumes...)
deployment := &appv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
@@ -276,6 +269,7 @@ func buildDeployment(args *AdaptorArgs, eventBus *eventbusv1alpha1.EventBus) (*a
if err := controllerscommon.SetObjectMeta(args.Sensor, deployment, v1alpha1.SchemaGroupVersionKind); err != nil {
return nil, err
}
+
return deployment, nil
}
func updateOrCreateConfigMap(ctx context.Context, client client.Client, name, namespace string, data map[string]string) error {
diff --git a/controllers/sensor/resource_test.go b/controllers/sensor/resource_test.go
index a79f1bc576..3fe164a35f 100644
--- a/controllers/sensor/resource_test.go
+++ b/controllers/sensor/resource_test.go
@@ -30,6 +30,7 @@ import (
"github.com/argoproj/argo-events/common"
"github.com/argoproj/argo-events/common/logging"
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1"
"github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
)
@@ -124,6 +125,52 @@ var (
},
},
}
+
+ fakeEventBusJetstream = &eventbusv1alpha1.EventBus{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: eventbusv1alpha1.SchemeGroupVersion.String(),
+ Kind: "EventBus",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: testNamespace,
+ Name: common.DefaultEventBusName,
+ },
+ Spec: eventbusv1alpha1.EventBusSpec{
+ JetStream: &eventbusv1alpha1.JetStreamBus{
+ Version: "x.x.x",
+ },
+ },
+ Status: eventbusv1alpha1.EventBusStatus{
+ Config: eventbusv1alpha1.BusConfig{
+ JetStream: &eventbusv1alpha1.JetStreamConfig{
+ URL: "nats://xxxx",
+ },
+ },
+ },
+ }
+
+ fakeEventBusKafka = &eventbusv1alpha1.EventBus{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: eventbusv1alpha1.SchemeGroupVersion.String(),
+ Kind: "EventBus",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: testNamespace,
+ Name: common.DefaultEventBusName,
+ },
+ Spec: eventbusv1alpha1.EventBusSpec{
+ Kafka: &eventbusv1alpha1.KafkaBus{
+ URL: "localhost:9092",
+ },
+ },
+ Status: eventbusv1alpha1.EventBusStatus{
+ Config: eventbusv1alpha1.BusConfig{
+ Kafka: &eventbusv1alpha1.KafkaBus{
+ URL: "localhost:9092",
+ },
+ },
+ },
+ }
)
func Test_BuildDeployment(t *testing.T) {
@@ -234,6 +281,54 @@ func Test_BuildDeployment(t *testing.T) {
assert.True(t, hasConfigMapVolumeMount)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 4, "Verify unexpected volumes aren't mounted")
})
+ t.Run("test kafka eventbus secrets attached", func(t *testing.T) {
+ args := &AdaptorArgs{
+ Image: testImage,
+ Sensor: sensorObj,
+ Labels: testLabels,
+ }
+
+ // add secrets to kafka eventbus
+ testBus := fakeEventBusKafka.DeepCopy()
+ testBus.Spec.Kafka.TLS = &apicommon.TLSConfig{
+ CACertSecret: &corev1.SecretKeySelector{Key: "cert", LocalObjectReference: corev1.LocalObjectReference{Name: "tls-secret"}},
+ }
+ testBus.Spec.Kafka.SASL = &apicommon.SASLConfig{
+ Mechanism: "SCRAM-SHA-512",
+ UserSecret: &corev1.SecretKeySelector{Key: "username", LocalObjectReference: corev1.LocalObjectReference{Name: "sasl-secret"}},
+ PasswordSecret: &corev1.SecretKeySelector{Key: "password", LocalObjectReference: corev1.LocalObjectReference{Name: "sasl-secret"}},
+ }
+
+ deployment, err := buildDeployment(args, testBus)
+ assert.Nil(t, err)
+ assert.NotNil(t, deployment)
+
+ hasSASLSecretVolume := false
+ hasSASLSecretVolumeMount := false
+ hasTLSSecretVolume := false
+ hasTLSSecretVolumeMount := false
+ for _, volume := range deployment.Spec.Template.Spec.Volumes {
+ if volume.Name == "secret-sasl-secret" {
+ hasSASLSecretVolume = true
+ }
+ if volume.Name == "secret-tls-secret" {
+ hasTLSSecretVolume = true
+ }
+ }
+ for _, volumeMount := range deployment.Spec.Template.Spec.Containers[0].VolumeMounts {
+ if volumeMount.Name == "secret-sasl-secret" {
+ hasSASLSecretVolumeMount = true
+ }
+ if volumeMount.Name == "secret-tls-secret" {
+ hasTLSSecretVolumeMount = true
+ }
+ }
+
+ assert.True(t, hasSASLSecretVolume)
+ assert.True(t, hasSASLSecretVolumeMount)
+ assert.True(t, hasTLSSecretVolume)
+ assert.True(t, hasTLSSecretVolumeMount)
+ })
}
func TestResourceReconcile(t *testing.T) {
@@ -249,37 +344,42 @@ func TestResourceReconcile(t *testing.T) {
assert.False(t, sensorObj.Status.IsReady())
})
- t.Run("test resource reconcile with eventbus", func(t *testing.T) {
- ctx := context.TODO()
- cl := fake.NewClientBuilder().Build()
- testBus := fakeEventBus.DeepCopy()
- testBus.Status.MarkDeployed("test", "test")
- testBus.Status.MarkConfigured()
- err := cl.Create(ctx, testBus)
- assert.Nil(t, err)
- args := &AdaptorArgs{
- Image: testImage,
- Sensor: sensorObj,
- Labels: testLabels,
- }
- err = Reconcile(cl, testBus, args, logging.NewArgoEventsLogger())
- assert.Nil(t, err)
- assert.True(t, sensorObj.Status.IsReady())
+ for _, eb := range []*eventbusv1alpha1.EventBus{fakeEventBus, fakeEventBusJetstream, fakeEventBusKafka} {
+ testBus := eb.DeepCopy()
- deployList := &appv1.DeploymentList{}
- err = cl.List(ctx, deployList, &client.ListOptions{
- Namespace: testNamespace,
- })
- assert.NoError(t, err)
- assert.Equal(t, 1, len(deployList.Items))
+ t.Run("test resource reconcile with eventbus", func(t *testing.T) {
+ ctx := context.TODO()
+ cl := fake.NewClientBuilder().Build()
+ testBus.Status.MarkDeployed("test", "test")
+ testBus.Status.MarkConfigured()
+ err := cl.Create(ctx, testBus)
+ assert.Nil(t, err)
+ args := &AdaptorArgs{
+ Image: testImage,
+ Sensor: sensorObj,
+ Labels: testLabels,
+ }
+ err = Reconcile(cl, testBus, args, logging.NewArgoEventsLogger())
+ assert.Nil(t, err)
+ assert.True(t, sensorObj.Status.IsReady())
- svcList := &corev1.ServiceList{}
- err = cl.List(ctx, svcList, &client.ListOptions{
- Namespace: testNamespace,
+ deployList := &appv1.DeploymentList{}
+ err = cl.List(ctx, deployList, &client.ListOptions{
+ Namespace: testNamespace,
+ })
+ assert.NoError(t, err)
+ assert.Equal(t, 1, len(deployList.Items))
+
+ svcList := &corev1.ServiceList{}
+ err = cl.List(ctx, svcList, &client.ListOptions{
+ Namespace: testNamespace,
+ })
+ assert.NoError(t, err)
+ assert.Equal(t, 0, len(svcList.Items))
+ assert.NoError(t, err)
+ assert.Equal(t, 0, len(svcList.Items))
})
- assert.NoError(t, err)
- assert.Equal(t, 0, len(svcList.Items))
- })
+ }
t.Run("test resource reconcile with live reload (create/update)", func(t *testing.T) {
ctx := context.TODO()
cl := fake.NewClientBuilder().Build()
diff --git a/docs/concepts/eventbus.md b/docs/concepts/eventbus.md
index 251fb48470..9478f8068f 100644
--- a/docs/concepts/eventbus.md
+++ b/docs/concepts/eventbus.md
@@ -1,7 +1,7 @@
-# Eventbus
+# EventBus
-The eventbus acts as the transport layer of Argo-Events by connecting the event-sources and sensors.
+The EventBus acts as the transport layer of Argo-Events by connecting the EventSources and Sensors.
-Event-Sources publish the events while the sensors subscribe to the events to execute triggers.
+EventSources publish the events while the Sensors subscribe to the events to execute triggers.
-There are two implementations of the eventbus: [NATS streaming](https://docs.nats.io/legacy/stan/intro#:~:text=NATS%20Streaming%20is%20a%20data,with%20the%20core%20NATS%20platform.) and now [NATS Jetstream](https://docs.nats.io/nats-concepts/jetstream) (which will replace the former, which will be deprecated).
+There are three implementations of the EventBus: [NATS](https://docs.nats.io/legacy/stan/intro#:~:text=NATS%20Streaming%20is%20a%20data,with%20the%20core%20NATS%20platform.) (deprecated), [Jetstream](https://docs.nats.io/nats-concepts/jetstream), and [Kafka](https://kafka.apache.org).
diff --git a/docs/eventbus/eventbus.md b/docs/eventbus/eventbus.md
index 7794a42ce4..50b7e41189 100644
--- a/docs/eventbus/eventbus.md
+++ b/docs/eventbus/eventbus.md
@@ -7,7 +7,9 @@
EventBus is a Kubernetes
[Custom Resource](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)
which is used for event transmission from EventSources to Sensors. Currently,
-EventBus is backed by [NATS](https://docs.nats.io/), including both their NATS Streaming service and their newer Jetstream service. In the future, this can be expanded to support other technologies as well.
+EventBus is backed by [NATS](https://docs.nats.io/), including both their NATS
+Streaming service, their newer Jetstream service, and Kafka. In the future,
+this can be expanded to support other technologies as well.
EventBus is namespaced; an EventBus object is required in a namespace to make
EventSource and Sensor work.
diff --git a/docs/eventbus/kafka.md b/docs/eventbus/kafka.md
new file mode 100644
index 0000000000..92edf046bf
--- /dev/null
+++ b/docs/eventbus/kafka.md
@@ -0,0 +1,52 @@
+Kafka is a widely used event streaming platform. Unlike NATs and Jetstream, a
+Kafka cluster managed independently of Argo Events is required to use a Kafka
+EventBus.
+
+## Example
+```yaml
+kind: EventBus
+metadata:
+ name: default
+spec:
+ kafka:
+ url: kafka:9092 # must be deployed independently
+ topic: "example" # optional
+```
+
+See [here](https://github.com/argoproj/argo-events/blob/develop/api/event-bus.md#kafkabus)
+for the full specification.
+
+## Topics
+
+The Kafka EventBus requires one event topic and two additional topics (trigger
+and action) per Sensor. These topics will not be created automatically unless
+the Kafka `auto.create.topics.enable` cluster configuration is set to true,
+otherwise it is your responsibility to create these topics. If a topic does
+not exist and cannot be automatically created, the EventSource and/or Sensor
+will exit with an error.
+
+By default the topics are named as follows.
+
+| topic | name |
+| ----- | ---- |
+| event | `{namespace}-{eventbus-name}` |
+| trigger | `{namespace}-{eventbus-name}-{sensor-name}-trigger` |
+| action | `{namespace}-{eventbus-name}-{sensor-name}-action` |
+
+If a topic name is specified in the EventBus specification, then the topics are
+named as follows.
+
+| topic | name |
+| ----- | ---- |
+| event | `{spec.kafka.topic}` |
+| trigger | `{spec.kafka.topic}-{sensor-name}-trigger` |
+| action | `{spec.kafka.topic}-{sensor-name}-action` |
+
+## Horizontal Scaling and Leader Election
+
+Sensors that use a Kafka EventBus can scale horizontally. Unlike NATs and
+Jetstream, specifiying a replicas value greater than 1 will result in all
+Sensor pods actively processing events. However, an EventSource that uses a
+Kafka EventBus cannot be horizontally scaled and a
+[kubernetes leader election](/eventsources/ha/#kubernetes-leader-election) is
+used.
diff --git a/docs/eventsources/ha.md b/docs/eventsources/ha.md
index 368b0308bc..7c5609f9fc 100644
--- a/docs/eventsources/ha.md
+++ b/docs/eventsources/ha.md
@@ -55,9 +55,10 @@ old one is gone.
## Kubernetes Leader Election
-By default, Argo Events will use NATS for the HA leader election. Alternatively,
-you can opt-in to a Kubernetes native leader election by specifying the following
-annotation.
+By default, Argo Events will use NATS for the HA leader election except when
+using a Kafka Eventbus, in which case a kubernetes leader election will be used.
+If using a different EventBus you can opt-in to a Kubernetes native leader
+election by specifying the following annotation.
```yaml
annotations:
events.argoproj.io/leader-election: k8s
diff --git a/docs/sensors/ha.md b/docs/sensors/ha.md
index 7e9519b3db..0e1a187174 100644
--- a/docs/sensors/ha.md
+++ b/docs/sensors/ha.md
@@ -11,9 +11,11 @@ behaviors!**
## Kubernetes Leader Election
-By default, Argo Events will use NATS for the HA leader election. Alternatively,
-you can opt-in to a Kubernetes native leader election by specifying the following
-annotation.
+By default, Argo Events will use NATS for the HA leader election except when
+using a Kafka Eventbus, in which case a leader election is not required as a
+Sensor that uses a Kafka EventBus is capable of horizontally scaling. If using
+a different EventBus you can opt-in to a Kubernetes native leader election by
+specifying the following annotation.
```yaml
annotations:
events.argoproj.io/leader-election: k8s
diff --git a/eventbus/common/interface.go b/eventbus/common/interface.go
index c2b8ddf272..524dc05e8e 100644
--- a/eventbus/common/interface.go
+++ b/eventbus/common/interface.go
@@ -42,5 +42,9 @@ type EventSourceDriver interface {
type SensorDriver interface {
Initialize() error
- Connect(triggerName string, dependencyExpression string, deps []Dependency) (TriggerConnection, error)
+ Connect(ctx context.Context,
+ triggerName string,
+ dependencyExpression string,
+ deps []Dependency,
+ atLeastOnce bool) (TriggerConnection, error)
}
diff --git a/eventbus/driver.go b/eventbus/driver.go
index 3344607c42..1277713916 100644
--- a/eventbus/driver.go
+++ b/eventbus/driver.go
@@ -13,6 +13,8 @@ import (
eventbuscommon "github.com/argoproj/argo-events/eventbus/common"
jetstreamsource "github.com/argoproj/argo-events/eventbus/jetstream/eventsource"
jetstreamsensor "github.com/argoproj/argo-events/eventbus/jetstream/sensor"
+ kafkasource "github.com/argoproj/argo-events/eventbus/kafka/eventsource"
+ kafkasensor "github.com/argoproj/argo-events/eventbus/kafka/sensor"
stansource "github.com/argoproj/argo-events/eventbus/stan/eventsource"
stansensor "github.com/argoproj/argo-events/eventbus/stan/sensor"
eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1"
@@ -35,12 +37,12 @@ func GetEventSourceDriver(ctx context.Context, eventBusConfig eventbusv1alpha1.B
var eventBusType apicommon.EventBusType
switch {
- case eventBusConfig.NATS != nil && eventBusConfig.JetStream != nil:
- return nil, fmt.Errorf("invalid event bus, NATS and Jetstream shouldn't both be specified")
case eventBusConfig.NATS != nil:
eventBusType = apicommon.EventBusNATS
case eventBusConfig.JetStream != nil:
eventBusType = apicommon.EventBusJetStream
+ case eventBusConfig.Kafka != nil:
+ eventBusType = apicommon.EventBusKafka
default:
return nil, fmt.Errorf("invalid event bus")
}
@@ -57,13 +59,15 @@ func GetEventSourceDriver(ctx context.Context, eventBusConfig eventbusv1alpha1.B
if err != nil {
return nil, err
}
+ case apicommon.EventBusKafka:
+ dvr = kafkasource.NewKafkaSource(eventBusConfig.Kafka, logger)
default:
return nil, fmt.Errorf("invalid eventbus type")
}
return dvr, nil
}
-func GetSensorDriver(ctx context.Context, eventBusConfig eventbusv1alpha1.BusConfig, sensorSpec *v1alpha1.Sensor) (eventbuscommon.SensorDriver, error) {
+func GetSensorDriver(ctx context.Context, eventBusConfig eventbusv1alpha1.BusConfig, sensorSpec *v1alpha1.Sensor, hostname string) (eventbuscommon.SensorDriver, error) {
auth, err := GetAuth(ctx, eventBusConfig)
if err != nil {
return nil, err
@@ -79,12 +83,12 @@ func GetSensorDriver(ctx context.Context, eventBusConfig eventbusv1alpha1.BusCon
var eventBusType apicommon.EventBusType
switch {
- case eventBusConfig.NATS != nil && eventBusConfig.JetStream != nil:
- return nil, fmt.Errorf("invalid event bus, NATS and Jetstream shouldn't both be specified")
case eventBusConfig.NATS != nil:
eventBusType = apicommon.EventBusNATS
case eventBusConfig.JetStream != nil:
eventBusType = apicommon.EventBusJetStream
+ case eventBusConfig.Kafka != nil:
+ eventBusType = apicommon.EventBusKafka
default:
return nil, fmt.Errorf("invalid event bus")
}
@@ -97,6 +101,9 @@ func GetSensorDriver(ctx context.Context, eventBusConfig eventbusv1alpha1.BusCon
case apicommon.EventBusJetStream:
dvr, err = jetstreamsensor.NewSensorJetstream(eventBusConfig.JetStream.URL, sensorSpec, eventBusConfig.JetStream.StreamConfig, auth, logger) // don't need to pass in subject because subjects will be derived from dependencies
return dvr, err
+ case apicommon.EventBusKafka:
+ dvr = kafkasensor.NewKafkaSensor(eventBusConfig.Kafka, sensorSpec, hostname, logger)
+ return dvr, nil
default:
return nil, fmt.Errorf("invalid eventbus type")
}
@@ -111,6 +118,8 @@ func GetAuth(ctx context.Context, eventBusConfig eventbusv1alpha1.BusConfig) (*e
eventBusAuth = eventBusConfig.NATS.Auth
case eventBusConfig.JetStream != nil:
eventBusAuth = &eventbusv1alpha1.AuthStrategyBasic
+ case eventBusConfig.Kafka != nil:
+ eventBusAuth = nil
default:
return nil, fmt.Errorf("invalid event bus")
}
diff --git a/eventbus/driver_test.go b/eventbus/driver_test.go
index 9a85a9f1ac..b07f8af682 100644
--- a/eventbus/driver_test.go
+++ b/eventbus/driver_test.go
@@ -15,6 +15,7 @@ const (
testSensorName = "sensor-xxxxx"
testEventSourceName = "es-xxxxx"
testSubject = "subj-xxxxx"
+ testHostname = "sensor-xxxxx-xxxxx"
)
var (
@@ -46,23 +47,23 @@ var (
func TestGetSensorDriver(t *testing.T) {
t.Run("get driver without eventbus", func(t *testing.T) {
- _, err := GetSensorDriver(context.Background(), testBadBusConfig, testValidSensorSpec)
+ _, err := GetSensorDriver(context.Background(), testBadBusConfig, testValidSensorSpec, testHostname)
assert.Error(t, err)
})
t.Run("get driver with none auth eventbus", func(t *testing.T) {
- driver, err := GetSensorDriver(context.Background(), testBusConfig, testValidSensorSpec)
+ driver, err := GetSensorDriver(context.Background(), testBusConfig, testValidSensorSpec, testHostname)
assert.NoError(t, err)
assert.NotNil(t, driver)
})
t.Run("get driver with invalid sensor spec", func(t *testing.T) {
- _, err := GetSensorDriver(context.Background(), testBusConfig, testNoNameSensorSpec)
+ _, err := GetSensorDriver(context.Background(), testBusConfig, testNoNameSensorSpec, testHostname)
assert.Error(t, err)
})
t.Run("get driver with nil sensor spec", func(t *testing.T) {
- _, err := GetSensorDriver(context.Background(), testBusConfig, nil)
+ _, err := GetSensorDriver(context.Background(), testBusConfig, nil, testHostname)
assert.Error(t, err)
})
}
diff --git a/eventbus/jetstream/sensor/sensor_jetstream.go b/eventbus/jetstream/sensor/sensor_jetstream.go
index 8a962e350d..921b318094 100644
--- a/eventbus/jetstream/sensor/sensor_jetstream.go
+++ b/eventbus/jetstream/sensor/sensor_jetstream.go
@@ -1,6 +1,7 @@
package sensor
import (
+ "context"
"fmt"
"strings"
"time"
@@ -75,7 +76,7 @@ func (stream *SensorJetstream) Initialize() error {
return err
}
-func (stream *SensorJetstream) Connect(triggerName string, dependencyExpression string, deps []eventbuscommon.Dependency) (eventbuscommon.TriggerConnection, error) {
+func (stream *SensorJetstream) Connect(ctx context.Context, triggerName string, dependencyExpression string, deps []eventbuscommon.Dependency, atLeastOnce bool) (eventbuscommon.TriggerConnection, error) {
conn, err := stream.MakeConnection()
if err != nil {
return nil, err
diff --git a/eventbus/kafka/base/kafka.go b/eventbus/kafka/base/kafka.go
new file mode 100644
index 0000000000..b39fcb8dd3
--- /dev/null
+++ b/eventbus/kafka/base/kafka.go
@@ -0,0 +1,112 @@
+package base
+
+import (
+ "strings"
+
+ "github.com/Shopify/sarama"
+ "github.com/argoproj/argo-events/common"
+ eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1"
+ "go.uber.org/zap"
+)
+
+type Kafka struct {
+ Logger *zap.SugaredLogger
+ config *eventbusv1alpha1.KafkaBus
+}
+
+func NewKafka(config *eventbusv1alpha1.KafkaBus, logger *zap.SugaredLogger) *Kafka {
+ // set defaults
+ if config.ConsumerGroup == nil {
+ config.ConsumerGroup = &eventbusv1alpha1.KafkaConsumerGroup{}
+ }
+
+ return &Kafka{
+ Logger: logger,
+ config: config,
+ }
+}
+
+func (k *Kafka) Brokers() []string {
+ return strings.Split(k.config.URL, ",")
+}
+
+func (k *Kafka) Config() (*sarama.Config, error) {
+ config := sarama.NewConfig()
+
+ // consumer config
+ config.Consumer.IsolationLevel = sarama.ReadCommitted
+ config.Consumer.Offsets.AutoCommit.Enable = false
+
+ switch k.config.ConsumerGroup.StartOldest {
+ case true:
+ config.Consumer.Offsets.Initial = sarama.OffsetOldest
+ case false:
+ config.Consumer.Offsets.Initial = sarama.OffsetNewest
+ }
+
+ switch k.config.ConsumerGroup.RebalanceStrategy {
+ case "sticky":
+ config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.BalanceStrategySticky}
+ case "roundrobin":
+ config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.BalanceStrategyRoundRobin}
+ default:
+ config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.BalanceStrategyRange}
+ }
+
+ // producer config
+ config.Producer.Idempotent = true
+ config.Producer.RequiredAcks = sarama.WaitForAll
+ config.Net.MaxOpenRequests = 1
+
+ // common config
+ if k.config.Version != "" {
+ version, err := sarama.ParseKafkaVersion(k.config.Version)
+ if err != nil {
+ return nil, err
+ }
+
+ config.Version = version
+ }
+
+ // sasl
+ if k.config.SASL != nil {
+ config.Net.SASL.Enable = true
+ config.Net.SASL.Mechanism = sarama.SASLMechanism(k.config.SASL.GetMechanism())
+
+ switch config.Net.SASL.Mechanism {
+ case "SCRAM-SHA-512":
+ config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient {
+ return &common.XDGSCRAMClient{HashGeneratorFcn: common.SHA512New}
+ }
+ case "SCRAM-SHA-256":
+ config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient {
+ return &common.XDGSCRAMClient{HashGeneratorFcn: common.SHA256New}
+ }
+ }
+
+ user, err := common.GetSecretFromVolume(k.config.SASL.UserSecret)
+ if err != nil {
+ return nil, err
+ }
+ config.Net.SASL.User = user
+
+ password, err := common.GetSecretFromVolume(k.config.SASL.PasswordSecret)
+ if err != nil {
+ return nil, err
+ }
+ config.Net.SASL.Password = password
+ }
+
+ // tls
+ if k.config.TLS != nil {
+ tls, err := common.GetTLSConfig(k.config.TLS)
+ if err != nil {
+ return nil, err
+ }
+
+ config.Net.TLS.Config = tls
+ config.Net.TLS.Enable = true
+ }
+
+ return config, nil
+}
diff --git a/eventbus/kafka/base/kafka_conn.go b/eventbus/kafka/base/kafka_conn.go
new file mode 100644
index 0000000000..ea3072dba7
--- /dev/null
+++ b/eventbus/kafka/base/kafka_conn.go
@@ -0,0 +1,13 @@
+package base
+
+import "go.uber.org/zap"
+
+type KafkaConnection struct {
+ Logger *zap.SugaredLogger
+}
+
+func NewKafkaConnection(logger *zap.SugaredLogger) *KafkaConnection {
+ return &KafkaConnection{
+ Logger: logger,
+ }
+}
diff --git a/eventbus/kafka/base/utils.go b/eventbus/kafka/base/utils.go
new file mode 100644
index 0000000000..e52490d51e
--- /dev/null
+++ b/eventbus/kafka/base/utils.go
@@ -0,0 +1,51 @@
+package base
+
+import (
+ "fmt"
+ "time"
+)
+
+func EventKey(source string, subject string) string {
+ return fmt.Sprintf("%s.%s", source, subject)
+}
+
+// Batch returns a read only channel that receives values from the
+// input channel batched together into a slice. A value is sent to
+// the output channel when the slice reaches n elements, or d time
+// has elapsed, whichever happens first. Ordering is maintained.
+func Batch[T any](n int, d time.Duration, in <-chan T) <-chan []T {
+ out := make(chan []T, 1)
+
+ go func() {
+ batch := []T{}
+ timer := time.NewTimer(d)
+ timer.Stop()
+
+ defer close(out)
+ defer timer.Stop()
+
+ for {
+ select {
+ case item, ok := <-in:
+ if !ok {
+ return
+ }
+ if len(batch) == 0 {
+ timer.Reset(d)
+ }
+ if batch = append(batch, item); len(batch) == n {
+ timer.Stop()
+ out <- batch
+ batch = nil
+ }
+ case <-timer.C:
+ if len(batch) > 0 {
+ out <- batch
+ batch = nil
+ }
+ }
+ }
+ }()
+
+ return out
+}
diff --git a/eventbus/kafka/base/utils_test.go b/eventbus/kafka/base/utils_test.go
new file mode 100644
index 0000000000..be8359396b
--- /dev/null
+++ b/eventbus/kafka/base/utils_test.go
@@ -0,0 +1,82 @@
+package base
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestBatchDurationReached(t *testing.T) {
+ in := make(chan int)
+ defer close(in)
+
+ out := Batch(5, 1*time.Second, in)
+
+ t0 := time.Now()
+ in <- 0
+ assert.Equal(t, []int{0}, <-out)
+ assert.Equal(t, time.Second, time.Since(t0).Truncate(time.Second))
+
+ t1 := time.Now()
+ in <- 1
+ in <- 2
+ assert.Equal(t, []int{1, 2}, <-out)
+ assert.Equal(t, time.Second, time.Since(t1).Truncate(time.Second))
+
+ t2 := time.Now()
+ in <- 3
+ in <- 4
+ in <- 5
+ assert.Equal(t, []int{3, 4, 5}, <-out)
+ assert.Equal(t, time.Second, time.Since(t2).Truncate(time.Second))
+}
+
+func TestBatchSizeReached(t *testing.T) {
+ in := make(chan int)
+ defer close(in)
+
+ out := Batch(2, 1*time.Second, in)
+
+ t0 := time.Now()
+ in <- 0
+ in <- 1
+ assert.Equal(t, <-out, []int{0, 1})
+ assert.Equal(t, time.Duration(0), time.Since(t0).Truncate(time.Second))
+
+ t1 := time.Now()
+ in <- 2
+ in <- 3
+ in <- 4
+ in <- 5
+ assert.Equal(t, []int{2, 3}, <-out)
+ assert.Equal(t, []int{4, 5}, <-out)
+ assert.Equal(t, time.Duration(0), time.Since(t1).Truncate(time.Second))
+}
+
+func TestBatchMaintainsOrder(t *testing.T) {
+ in := make(chan string)
+ defer close(in)
+
+ out := Batch(10, 1*time.Second, in)
+
+ in <- "a"
+ in <- "b"
+ in <- "c"
+ in <- "d"
+ in <- "e"
+ in <- "f"
+ in <- "g"
+ in <- "h"
+ in <- "i"
+ in <- "j"
+ assert.Equal(t, []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j"}, <-out)
+}
+
+func TestBatchChannelCleanedUp(t *testing.T) {
+ in := make(chan string)
+ out := Batch(10, 1*time.Second, in)
+
+ close(in)
+ assert.Equal(t, []string(nil), <-out)
+}
diff --git a/eventbus/kafka/eventsource/source_conn.go b/eventbus/kafka/eventsource/source_conn.go
new file mode 100644
index 0000000000..0463990482
--- /dev/null
+++ b/eventbus/kafka/eventsource/source_conn.go
@@ -0,0 +1,55 @@
+package eventsource
+
+import (
+ "context"
+
+ "github.com/Shopify/sarama"
+ "github.com/argoproj/argo-events/eventbus/common"
+ "github.com/argoproj/argo-events/eventbus/kafka/base"
+ "go.uber.org/zap"
+)
+
+type KafkaSourceConnection struct {
+ *base.KafkaConnection
+ Topic string
+ Client sarama.Client
+ Producer sarama.SyncProducer
+}
+
+func (c *KafkaSourceConnection) Publish(ctx context.Context, msg common.Message) error {
+ key := base.EventKey(msg.EventSourceName, msg.EventName)
+ partition, offset, err := c.Producer.SendMessage(&sarama.ProducerMessage{
+ Topic: c.Topic,
+ Key: sarama.StringEncoder(key),
+ Value: sarama.ByteEncoder(msg.Body),
+ })
+
+ if err != nil {
+ // fail fast if topic does not exist
+ if err == sarama.ErrUnknownTopicOrPartition {
+ c.Logger.Fatalf(
+ "Topic does not exist. Please ensure the topic '%s' has been created, or the kafka setting '%s' is set to true.",
+ c.Topic,
+ "auto.create.topics.enable",
+ )
+ }
+
+ return err
+ }
+
+ c.Logger.Infow("Published message to kafka", zap.String("topic", c.Topic), zap.String("key", key), zap.Int32("partition", partition), zap.Int64("offset", offset))
+
+ return nil
+}
+
+func (c *KafkaSourceConnection) Close() error {
+ if err := c.Producer.Close(); err != nil {
+ return err
+ }
+
+ return c.Client.Close()
+}
+
+func (c *KafkaSourceConnection) IsClosed() bool {
+ return c.Client.Closed()
+}
diff --git a/eventbus/kafka/eventsource/source_kafka.go b/eventbus/kafka/eventsource/source_kafka.go
new file mode 100644
index 0000000000..d73e0d193f
--- /dev/null
+++ b/eventbus/kafka/eventsource/source_kafka.go
@@ -0,0 +1,55 @@
+package eventsource
+
+import (
+ "github.com/Shopify/sarama"
+ eventbuscommon "github.com/argoproj/argo-events/eventbus/common"
+ "github.com/argoproj/argo-events/eventbus/kafka/base"
+ eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1"
+ "go.uber.org/zap"
+)
+
+type KafkaSource struct {
+ *base.Kafka
+ topic string
+}
+
+func NewKafkaSource(config *eventbusv1alpha1.KafkaBus, logger *zap.SugaredLogger) *KafkaSource {
+ return &KafkaSource{
+ Kafka: base.NewKafka(config, logger),
+ topic: config.Topic,
+ }
+}
+
+func (s *KafkaSource) Initialize() error {
+ return nil
+}
+
+func (s *KafkaSource) Connect(string) (eventbuscommon.EventSourceConnection, error) {
+ config, err := s.Config()
+ if err != nil {
+ return nil, err
+ }
+
+ // eventsource specific config
+ config.Producer.Return.Errors = true
+ config.Producer.Return.Successes = true
+
+ client, err := sarama.NewClient(s.Brokers(), config)
+ if err != nil {
+ return nil, err
+ }
+
+ producer, err := sarama.NewSyncProducerFromClient(client)
+ if err != nil {
+ return nil, err
+ }
+
+ conn := &KafkaSourceConnection{
+ KafkaConnection: base.NewKafkaConnection(s.Logger),
+ Topic: s.topic,
+ Client: client,
+ Producer: producer,
+ }
+
+ return conn, nil
+}
diff --git a/eventbus/kafka/sensor/kafka_handler.go b/eventbus/kafka/sensor/kafka_handler.go
new file mode 100644
index 0000000000..2c5f25a14d
--- /dev/null
+++ b/eventbus/kafka/sensor/kafka_handler.go
@@ -0,0 +1,224 @@
+package kafka
+
+import (
+ "encoding/json"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/Shopify/sarama"
+ "github.com/argoproj/argo-events/eventbus/kafka/base"
+ "go.uber.org/zap"
+)
+
+type KafkaHandler struct {
+ *sync.Mutex
+ Logger *zap.SugaredLogger
+
+ // kafka details
+ GroupName string
+ Producer sarama.AsyncProducer
+ OffsetManager sarama.OffsetManager
+ TriggerTopic string
+
+ // handler functions
+ // one function for each consumed topic, return messages, an
+ // offset and an optional function that will in a transaction
+ Handlers map[string]func(*sarama.ConsumerMessage) ([]*sarama.ProducerMessage, int64, func())
+
+ // maintains a mapping of keys (which correspond to triggers)
+ // to offsets, used to ensure triggers aren't invoked twice
+ checkpoints Checkpoints
+}
+
+type Checkpoints map[string]map[int32]*Checkpoint
+
+type Checkpoint struct {
+ Logger *zap.SugaredLogger
+ Init bool
+ Offsets map[string]int64
+}
+
+func (c *Checkpoint) Skip(key string, offset int64) bool {
+ if c.Offsets == nil {
+ return false
+ }
+ return offset < c.Offsets[key]
+}
+
+func (c *Checkpoint) Set(key string, offset int64) {
+ if c.Offsets == nil {
+ c.Offsets = map[string]int64{}
+ }
+ c.Offsets[key] = offset
+}
+
+func (c *Checkpoint) Metadata() string {
+ if c.Offsets == nil {
+ return ""
+ }
+
+ metadata, err := json.Marshal(c.Offsets)
+ if err != nil {
+ c.Logger.Errorw("Failed to serialize metadata", err)
+ return ""
+ }
+
+ return string(metadata)
+}
+
+func (h *KafkaHandler) Setup(session sarama.ConsumerGroupSession) error {
+ // instantiates checkpoints for all topic/partitions managed by
+ // this claim
+ h.checkpoints = Checkpoints{}
+
+ for topic, partitions := range session.Claims() {
+ h.checkpoints[topic] = map[int32]*Checkpoint{}
+
+ for _, partition := range partitions {
+ partitionOffsetManager, err := h.OffsetManager.ManagePartition(topic, partition)
+ if err != nil {
+ return err
+ }
+
+ func() {
+ var offsets map[string]int64
+
+ defer partitionOffsetManager.AsyncClose()
+ offset, metadata := partitionOffsetManager.NextOffset()
+
+ // only need to manage the offsets for each trigger
+ // with respect to the trigger topic
+ if topic == h.TriggerTopic && metadata != "" {
+ if err := json.Unmarshal([]byte(metadata), &offsets); err != nil {
+ // if metadata is invalid json, it will be
+ // reset to an empty map
+ h.Logger.Errorw("Failed to deserialize metadata, resetting", err)
+ }
+ }
+
+ h.checkpoints[topic][partition] = &Checkpoint{
+ Logger: h.Logger,
+ Init: offset == -1, // mark offset when first message consumed
+ Offsets: offsets,
+ }
+ }()
+
+ h.OffsetManager.Commit()
+ if err := partitionOffsetManager.Close(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (h *KafkaHandler) Cleanup(session sarama.ConsumerGroupSession) error {
+ return nil
+}
+
+func (h *KafkaHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
+ handler, ok := h.Handlers[claim.Topic()]
+ if !ok {
+ return fmt.Errorf("unrecognized topic %s", claim.Topic())
+ }
+
+ checkpoint, ok := h.checkpoints[claim.Topic()][claim.Partition()]
+ if !ok {
+ return fmt.Errorf("unrecognized topic %s or partition %d", claim.Topic(), claim.Partition())
+ }
+
+ // Batch messsages from the claim message channel. A message will
+ // be produced to the batched channel if the max batch size is
+ // reached or the time limit has elapsed, whichever happens
+ // first. Batching helps optimize kafka transactions.
+ batch := base.Batch(100, 1*time.Second, claim.Messages())
+
+ for {
+ select {
+ case msgs := <-batch:
+ if len(msgs) == 0 {
+ h.Logger.Warn("Kafka batch contains no messages")
+ continue
+ }
+
+ transaction := &KafkaTransaction{
+ Logger: h.Logger,
+ Producer: h.Producer,
+ GroupName: h.GroupName,
+ Topic: claim.Topic(),
+ Partition: claim.Partition(),
+ ResetOffset: msgs[0].Offset,
+ ResetMetadata: checkpoint.Metadata(),
+ }
+
+ var messages []*sarama.ProducerMessage
+ var offset int64
+ var fns []func()
+
+ for _, msg := range msgs {
+ h.Logger.Infow("Received message",
+ zap.String("topic", msg.Topic),
+ zap.Int32("partition", msg.Partition),
+ zap.Int64("offset", msg.Offset))
+
+ key := string(msg.Key)
+
+ if checkpoint.Init {
+ // mark offset in order to reconsume from this
+ // offset if a restart occurs
+ session.MarkOffset(msg.Topic, msg.Partition, msg.Offset, "")
+ session.Commit()
+ checkpoint.Init = false
+ }
+
+ if checkpoint.Skip(key, msg.Offset) {
+ h.Logger.Infof("Skipping trigger '%s' (%d<%d)", key, msg.Offset, checkpoint.Offsets[key])
+ continue
+ }
+
+ m, o, f := handler(msg)
+ if msg.Topic == h.TriggerTopic && len(m) > 0 {
+ // when a trigger is invoked (there is a message)
+ // update the checkpoint to ensure the trigger
+ // is not re-invoked in the case of a restart
+ checkpoint.Set(key, msg.Offset+1)
+ }
+
+ // update transacation information
+ messages = append(messages, m...)
+ offset = o
+ if f != nil {
+ fns = append(fns, f)
+ }
+ }
+
+ func() {
+ h.Lock()
+ defer h.Unlock()
+ if err := transaction.Commit(session, messages, offset, checkpoint.Metadata()); err != nil {
+ h.Logger.Errorw("Transaction error", zap.Error(err))
+ }
+ }()
+
+ // invoke (action) functions asynchronously
+ for _, fn := range fns {
+ go fn()
+ }
+ case <-session.Context().Done():
+ return nil
+ }
+ }
+}
+
+func (h *KafkaHandler) Close() error {
+ h.Lock()
+ defer h.Unlock()
+
+ if err := h.OffsetManager.Close(); err != nil {
+ return err
+ }
+
+ return h.Producer.Close()
+}
diff --git a/eventbus/kafka/sensor/kafka_sensor.go b/eventbus/kafka/sensor/kafka_sensor.go
new file mode 100644
index 0000000000..38f1840678
--- /dev/null
+++ b/eventbus/kafka/sensor/kafka_sensor.go
@@ -0,0 +1,369 @@
+package kafka
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/Knetic/govaluate"
+ "github.com/Shopify/sarama"
+ eventbuscommon "github.com/argoproj/argo-events/eventbus/common"
+ "github.com/argoproj/argo-events/eventbus/kafka/base"
+ eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1"
+ sensorv1alpha1 "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+ "go.uber.org/zap"
+)
+
+type KafkaSensor struct {
+ *base.Kafka
+ *sync.Mutex
+ sensor *sensorv1alpha1.Sensor
+
+ // kafka details
+ topics *Topics
+ client sarama.Client
+ consumer sarama.ConsumerGroup
+ hostname string
+ groupName string
+
+ // triggers handlers
+ // holds the state of all sensor triggers
+ triggers Triggers
+
+ // kafka handler
+ // handles consuming from kafka, offsets, and transactions
+ kafkaHandler *KafkaHandler
+ connected bool
+}
+
+func NewKafkaSensor(kafkaConfig *eventbusv1alpha1.KafkaBus, sensor *sensorv1alpha1.Sensor, hostname string, logger *zap.SugaredLogger) *KafkaSensor {
+ topics := &Topics{
+ event: kafkaConfig.Topic,
+ trigger: fmt.Sprintf("%s-%s-%s", kafkaConfig.Topic, sensor.Name, "trigger"),
+ action: fmt.Sprintf("%s-%s-%s", kafkaConfig.Topic, sensor.Name, "action"),
+ }
+
+ var groupName string
+ if kafkaConfig.ConsumerGroup == nil || kafkaConfig.ConsumerGroup.GroupName == "" {
+ groupName = fmt.Sprintf("%s-%s", sensor.Namespace, sensor.Name)
+ } else {
+ groupName = kafkaConfig.ConsumerGroup.GroupName
+ }
+
+ return &KafkaSensor{
+ Kafka: base.NewKafka(kafkaConfig, logger),
+ Mutex: &sync.Mutex{},
+ sensor: sensor,
+ topics: topics,
+ hostname: hostname,
+ groupName: groupName,
+ triggers: Triggers{},
+ }
+}
+
+type Topics struct {
+ event string
+ trigger string
+ action string
+}
+
+func (t *Topics) List() []string {
+ return []string{t.event, t.trigger, t.action}
+}
+
+type Triggers map[string]KafkaTriggerHandler
+
+type TriggerWithDepName struct {
+ KafkaTriggerHandler
+ depName string
+}
+
+func (t Triggers) List(event *cloudevents.Event) []*TriggerWithDepName {
+ triggers := []*TriggerWithDepName{}
+
+ for _, trigger := range t {
+ if depName, ok := trigger.DependsOn(event); ok {
+ triggers = append(triggers, &TriggerWithDepName{trigger, depName})
+ }
+ }
+
+ return triggers
+}
+
+func (t Triggers) Ready() bool {
+ for _, trigger := range t {
+ if !trigger.Ready() {
+ return false
+ }
+ }
+ return true
+}
+
+func (s *KafkaSensor) Initialize() error {
+ config, err := s.Config()
+ if err != nil {
+ return err
+ }
+
+ // sensor specific config
+ config.Producer.Transaction.ID = s.hostname
+
+ client, err := sarama.NewClient(s.Brokers(), config)
+ if err != nil {
+ return err
+ }
+
+ consumer, err := sarama.NewConsumerGroupFromClient(s.groupName, client)
+ if err != nil {
+ return err
+ }
+
+ producer, err := sarama.NewAsyncProducerFromClient(client)
+ if err != nil {
+ return err
+ }
+
+ offsetManager, err := sarama.NewOffsetManagerFromClient(s.groupName, client)
+ if err != nil {
+ return err
+ }
+
+ s.client = client
+ s.consumer = consumer
+ s.kafkaHandler = &KafkaHandler{
+ Mutex: &sync.Mutex{},
+ Logger: s.Logger,
+ GroupName: s.groupName,
+ Producer: producer,
+ OffsetManager: offsetManager,
+ TriggerTopic: s.topics.trigger,
+ Handlers: map[string]func(*sarama.ConsumerMessage) ([]*sarama.ProducerMessage, int64, func()){
+ s.topics.event: s.Event,
+ s.topics.trigger: s.Trigger,
+ s.topics.action: s.Action,
+ },
+ }
+
+ return nil
+}
+
+func (s *KafkaSensor) Connect(ctx context.Context, triggerName string, depExpression string, dependencies []eventbuscommon.Dependency, atLeastOnce bool) (eventbuscommon.TriggerConnection, error) {
+ s.Lock()
+ defer s.Unlock()
+
+ // connect only if disconnected, if ever the connection is lost
+ // the connected boolean will flip and the sensor listener will
+ // attempt to reconnect by invoking this function again
+ if !s.connected {
+ go s.Listen(ctx)
+ s.connected = true
+ }
+
+ if _, ok := s.triggers[triggerName]; !ok {
+ expr, err := govaluate.NewEvaluableExpression(strings.ReplaceAll(depExpression, "-", "\\-"))
+ if err != nil {
+ return nil, err
+ }
+
+ depMap := map[string]eventbuscommon.Dependency{}
+ for _, dep := range dependencies {
+ depMap[base.EventKey(dep.EventSourceName, dep.EventName)] = dep
+ }
+
+ s.triggers[triggerName] = &KafkaTriggerConnection{
+ KafkaConnection: base.NewKafkaConnection(s.Logger),
+ sensorName: s.sensor.Name,
+ triggerName: triggerName,
+ depExpression: expr,
+ dependencies: depMap,
+ atLeastOnce: atLeastOnce,
+ close: s.Close,
+ isClosed: s.IsClosed,
+ }
+ }
+
+ return s.triggers[triggerName], nil
+}
+
+func (s *KafkaSensor) Listen(ctx context.Context) {
+ defer s.Disconnect()
+
+ for {
+ if len(s.triggers) != len(s.sensor.Spec.Triggers) || !s.triggers.Ready() {
+ s.Logger.Info("Not ready to consume, waiting...")
+ time.Sleep(3 * time.Second)
+ continue
+ }
+
+ s.Logger.Infow("Consuming", zap.Strings("topics", s.topics.List()), zap.String("group", s.groupName))
+
+ if err := s.consumer.Consume(ctx, s.topics.List(), s.kafkaHandler); err != nil {
+ // fail fast if topics do not exist
+ if err == sarama.ErrUnknownTopicOrPartition {
+ s.Logger.Fatalf(
+ "Topics do not exist. Please ensure the topics '%s' have been created, or the kafka setting '%s' is set to true.",
+ s.topics.List(),
+ "auto.create.topics.enable",
+ )
+ }
+
+ s.Logger.Errorw("Failed to consume", zap.Error(err))
+ return
+ }
+
+ if err := ctx.Err(); err != nil {
+ s.Logger.Errorw("Kafka error", zap.Error(err))
+ return
+ }
+ }
+}
+
+func (s *KafkaSensor) Disconnect() {
+ s.Lock()
+ defer s.Unlock()
+
+ s.connected = false
+}
+
+func (s *KafkaSensor) Close() error {
+ s.Lock()
+ defer s.Unlock()
+
+ // protect against being called multiple times
+ if s.IsClosed() {
+ return nil
+ }
+
+ if err := s.consumer.Close(); err != nil {
+ return err
+ }
+
+ if err := s.kafkaHandler.Close(); err != nil {
+ return err
+ }
+
+ return s.client.Close()
+}
+
+func (s *KafkaSensor) IsClosed() bool {
+ return !s.connected || s.client.Closed()
+}
+
+func (s *KafkaSensor) Event(msg *sarama.ConsumerMessage) ([]*sarama.ProducerMessage, int64, func()) {
+ var event *cloudevents.Event
+ if err := json.Unmarshal(msg.Value, &event); err != nil {
+ s.Logger.Errorw("Failed to deserialize cloudevent, skipping", zap.Error(err))
+ return nil, msg.Offset + 1, nil
+ }
+
+ messages := []*sarama.ProducerMessage{}
+ for _, trigger := range s.triggers.List(event) {
+ event, err := trigger.Transform(trigger.depName, event)
+ if err != nil {
+ s.Logger.Errorw("Failed to transform cloudevent, skipping", zap.Error(err))
+ continue
+ }
+
+ if !trigger.Filter(trigger.depName, event) {
+ s.Logger.Debug("Filter condition satisfied, skipping")
+ continue
+ }
+
+ // if the trigger only requires one message to be invoked we
+ // can skip ahead to the action topic, otherwise produce to
+ // the trigger topic
+
+ var data any
+ var topic string
+ if trigger.OneAndDone() {
+ data = []*cloudevents.Event{event}
+ topic = s.topics.action
+ } else {
+ data = event
+ topic = s.topics.trigger
+ }
+
+ value, err := json.Marshal(data)
+ if err != nil {
+ s.Logger.Errorw("Failed to serialize cloudevent, skipping", zap.Error(err))
+ continue
+ }
+
+ messages = append(messages, &sarama.ProducerMessage{
+ Topic: topic,
+ Key: sarama.StringEncoder(trigger.Name()),
+ Value: sarama.ByteEncoder(value),
+ })
+ }
+
+ return messages, msg.Offset + 1, nil
+}
+
+func (s *KafkaSensor) Trigger(msg *sarama.ConsumerMessage) ([]*sarama.ProducerMessage, int64, func()) {
+ var event *cloudevents.Event
+ if err := json.Unmarshal(msg.Value, &event); err != nil {
+ // do not return here as we still need to call trigger.Offset
+ // below to determine current offset
+ s.Logger.Errorw("Failed to deserialize cloudevent, skipping", zap.Error(err))
+ }
+
+ messages := []*sarama.ProducerMessage{}
+ offset := msg.Offset + 1
+
+ // update trigger with new event and add any resulting action to
+ // transaction messages
+ if trigger, ok := s.triggers[string(msg.Key)]; ok && event != nil {
+ func() {
+ events, err := trigger.Update(event, msg.Partition, msg.Offset, msg.Timestamp)
+ if err != nil {
+ s.Logger.Errorw("Failed to update trigger, skipping", zap.Error(err))
+ return
+ }
+
+ // no events, trigger not yet satisfied
+ if events == nil {
+ return
+ }
+
+ value, err := json.Marshal(events)
+ if err != nil {
+ s.Logger.Errorw("Failed to serialize cloudevent, skipping", zap.Error(err))
+ return
+ }
+
+ messages = append(messages, &sarama.ProducerMessage{
+ Topic: s.topics.action,
+ Key: sarama.StringEncoder(trigger.Name()),
+ Value: sarama.ByteEncoder(value),
+ })
+ }()
+ }
+
+ // need to determine smallest possible offset against all
+ // triggers as other triggers may have messages that land on the
+ // same partition
+ for _, trigger := range s.triggers {
+ offset = trigger.Offset(msg.Partition, offset)
+ }
+
+ return messages, offset, nil
+}
+
+func (s *KafkaSensor) Action(msg *sarama.ConsumerMessage) ([]*sarama.ProducerMessage, int64, func()) {
+ var events []*cloudevents.Event
+ if err := json.Unmarshal(msg.Value, &events); err != nil {
+ s.Logger.Errorw("Failed to deserialize cloudevents, skipping", zap.Error(err))
+ return nil, msg.Offset + 1, nil
+ }
+
+ var f func()
+ if trigger, ok := s.triggers[string(msg.Key)]; ok {
+ f = trigger.Action(events)
+ }
+
+ return nil, msg.Offset + 1, f
+}
diff --git a/eventbus/kafka/sensor/kafka_transaction.go b/eventbus/kafka/sensor/kafka_transaction.go
new file mode 100644
index 0000000000..8bb42ded98
--- /dev/null
+++ b/eventbus/kafka/sensor/kafka_transaction.go
@@ -0,0 +1,99 @@
+package kafka
+
+import (
+ "github.com/Shopify/sarama"
+ "go.uber.org/zap"
+)
+
+type KafkaTransaction struct {
+ Logger *zap.SugaredLogger
+
+ // kafka details
+ Producer sarama.AsyncProducer
+ GroupName string
+ Topic string
+ Partition int32
+
+ // used to reset the offset and metadata if transaction fails
+ ResetOffset int64
+ ResetMetadata string
+}
+
+func (t *KafkaTransaction) Commit(session sarama.ConsumerGroupSession, messages []*sarama.ProducerMessage, offset int64, metadata string) error {
+ // No need for a transaction if no messages, just update the
+ // offset and metadata
+ if len(messages) == 0 {
+ session.MarkOffset(t.Topic, t.Partition, offset, metadata)
+ session.Commit()
+ return nil
+ }
+
+ t.Logger.Infow("Begin transaction",
+ zap.String("topic", t.Topic),
+ zap.Int32("partition", t.Partition),
+ zap.Int("messages", len(messages)))
+
+ if err := t.Producer.BeginTxn(); err != nil {
+ return err
+ }
+
+ for _, msg := range messages {
+ t.Producer.Input() <- msg
+ }
+
+ offsets := map[string][]*sarama.PartitionOffsetMetadata{
+ t.Topic: {{
+ Partition: t.Partition,
+ Offset: offset,
+ Metadata: &metadata,
+ }},
+ }
+
+ if err := t.Producer.AddOffsetsToTxn(offsets, t.GroupName); err != nil {
+ t.Logger.Errorw("Kafka transaction error", zap.Error(err))
+ t.handleTxnError(session, func() error {
+ return t.Producer.AddOffsetsToTxn(offsets, t.GroupName)
+ })
+ }
+
+ if err := t.Producer.CommitTxn(); err != nil {
+ t.Logger.Errorw("Kafka transaction error", zap.Error(err))
+ t.handleTxnError(session, func() error {
+ return t.Producer.CommitTxn()
+ })
+ }
+
+ t.Logger.Infow("Finished transaction",
+ zap.String("topic", t.Topic),
+ zap.Int32("partition", t.Partition))
+
+ return nil
+}
+
+func (t *KafkaTransaction) handleTxnError(session sarama.ConsumerGroupSession, defaulthandler func() error) {
+ for {
+ if t.Producer.TxnStatus()&sarama.ProducerTxnFlagFatalError != 0 {
+ // reset current consumer offset to retry consume this record
+ session.ResetOffset(t.Topic, t.Partition, t.ResetOffset, t.ResetMetadata)
+ // fatal error, need to restart
+ t.Logger.Fatal("Message consumer: t.Producer is in a fatal state.")
+ return
+ }
+ if t.Producer.TxnStatus()&sarama.ProducerTxnFlagAbortableError != 0 {
+ if err := t.Producer.AbortTxn(); err != nil {
+ t.Logger.Errorw("Message consumer: unable to abort transaction.", zap.Error(err))
+ continue
+ }
+ // reset current consumer offset to retry consume this record
+ session.ResetOffset(t.Topic, t.Partition, t.ResetOffset, t.ResetMetadata)
+ // fatal error, need to restart
+ t.Logger.Fatal("Message consumer: t.Producer is in a fatal state, aborted transaction.")
+ return
+ }
+
+ // attempt retry
+ if err := defaulthandler(); err == nil {
+ return
+ }
+ }
+}
diff --git a/eventbus/kafka/sensor/trigger_conn.go b/eventbus/kafka/sensor/trigger_conn.go
new file mode 100644
index 0000000000..149ea4cb83
--- /dev/null
+++ b/eventbus/kafka/sensor/trigger_conn.go
@@ -0,0 +1,91 @@
+package kafka
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/Knetic/govaluate"
+ "github.com/argoproj/argo-events/eventbus/common"
+ "github.com/argoproj/argo-events/eventbus/kafka/base"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+type KafkaTriggerConnection struct {
+ *base.KafkaConnection
+ KafkaTriggerHandler
+
+ sensorName string
+ triggerName string
+ depExpression *govaluate.EvaluableExpression
+ dependencies map[string]common.Dependency
+ atLeastOnce bool
+
+ // functions
+ close func() error
+ isClosed func() bool
+ transform func(string, cloudevents.Event) (*cloudevents.Event, error)
+ filter func(string, cloudevents.Event) bool
+ action func(map[string]cloudevents.Event)
+
+ // state
+ events []*eventWithMetadata
+ lastResetTime time.Time
+}
+
+type eventWithMetadata struct {
+ *cloudevents.Event
+ partition int32
+ offset int64
+ timestamp time.Time
+}
+
+func (e1 *eventWithMetadata) Same(e2 *eventWithMetadata) bool {
+ return e1.Source() == e2.Source() && e1.Subject() == e2.Subject()
+}
+
+func (e *eventWithMetadata) After(t time.Time) bool {
+ return t.IsZero() || e.timestamp.After(t)
+}
+
+func (c *KafkaTriggerConnection) String() string {
+ return fmt.Sprintf("KafkaTriggerConnection{Sensor:%s,Trigger:%s}", c.sensorName, c.triggerName)
+}
+
+func (c *KafkaTriggerConnection) Close() error {
+ return c.close()
+}
+
+func (c *KafkaTriggerConnection) IsClosed() bool {
+ return c.isClosed()
+}
+
+func (c *KafkaTriggerConnection) Subscribe(
+ ctx context.Context,
+ closeCh <-chan struct{},
+ resetConditionsCh <-chan struct{},
+ lastResetTime time.Time,
+ transform func(depName string, event cloudevents.Event) (*cloudevents.Event, error),
+ filter func(string, cloudevents.Event) bool,
+ action func(map[string]cloudevents.Event),
+ topic *string) error {
+ c.transform = transform
+ c.filter = filter
+ c.action = action
+ c.lastResetTime = lastResetTime
+
+ for {
+ select {
+ case <-ctx.Done():
+ return c.Close()
+ case <-closeCh:
+ // this is a noop since a kafka connection is maintained
+ // on the overall sensor vs indididual triggers
+ return nil
+ case <-resetConditionsCh:
+ // trigger update will filter out all events that occurred
+ // before this time
+ c.lastResetTime = time.Now()
+ }
+ }
+}
diff --git a/eventbus/kafka/sensor/trigger_handler.go b/eventbus/kafka/sensor/trigger_handler.go
new file mode 100644
index 0000000000..41e4662882
--- /dev/null
+++ b/eventbus/kafka/sensor/trigger_handler.go
@@ -0,0 +1,155 @@
+package kafka
+
+import (
+ "time"
+
+ "github.com/Knetic/govaluate"
+ "github.com/argoproj/argo-events/eventbus/common"
+ "github.com/argoproj/argo-events/eventbus/kafka/base"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+ "go.uber.org/zap"
+)
+
+type KafkaTriggerHandler interface {
+ common.TriggerConnection
+ Name() string
+ Ready() bool
+ OneAndDone() bool
+ DependsOn(*cloudevents.Event) (string, bool)
+ Transform(string, *cloudevents.Event) (*cloudevents.Event, error)
+ Filter(string, *cloudevents.Event) bool
+ Update(event *cloudevents.Event, partition int32, offset int64, timestamp time.Time) ([]*cloudevents.Event, error)
+ Offset(int32, int64) int64
+ Action([]*cloudevents.Event) func()
+}
+
+func (c *KafkaTriggerConnection) Name() string {
+ return c.triggerName
+}
+
+func (c *KafkaTriggerConnection) Ready() bool {
+ // cannot process events until the subscribe function has been
+ // called, which is when these functions are set
+ return c.transform != nil && c.filter != nil && c.action != nil
+}
+
+func (c *KafkaTriggerConnection) DependsOn(event *cloudevents.Event) (string, bool) {
+ if dep, ok := c.dependencies[base.EventKey(event.Source(), event.Subject())]; ok {
+ return dep.Name, true
+ }
+
+ return "", false
+}
+
+func (c *KafkaTriggerConnection) OneAndDone() bool {
+ for _, token := range c.depExpression.Tokens() {
+ if token.Kind == govaluate.LOGICALOP && token.Value == "&&" {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (c *KafkaTriggerConnection) Transform(depName string, event *cloudevents.Event) (*cloudevents.Event, error) {
+ return c.transform(depName, *event)
+}
+
+func (c *KafkaTriggerConnection) Filter(depName string, event *cloudevents.Event) bool {
+ return c.filter(depName, *event)
+}
+
+func (c *KafkaTriggerConnection) Update(event *cloudevents.Event, partition int32, offset int64, timestamp time.Time) ([]*cloudevents.Event, error) {
+ eventWithMetadata := &eventWithMetadata{
+ Event: event,
+ partition: partition,
+ offset: offset,
+ timestamp: timestamp,
+ }
+
+ // remove previous events with same source and subject and remove
+ // all events older than last condition reset time
+ i := 0
+ for _, event := range c.events {
+ if !event.Same(eventWithMetadata) && event.After(c.lastResetTime) {
+ c.events[i] = event
+ i++
+ }
+ }
+ for j := i; j < len(c.events); j++ {
+ c.events[j] = nil // avoid memory leak
+ }
+ c.events = append(c.events[:i], eventWithMetadata)
+
+ satisfied, err := c.satisfied()
+ if err != nil {
+ return nil, err
+ }
+
+ // if satisfied, publish a message to the action topic containing
+ // all events and reset the trigger
+ var events []*cloudevents.Event
+ if satisfied == true {
+ defer c.reset()
+ for _, event := range c.events {
+ events = append(events, event.Event)
+ }
+ }
+
+ return events, nil
+}
+
+func (c *KafkaTriggerConnection) Offset(partition int32, offset int64) int64 {
+ for _, event := range c.events {
+ if partition == event.partition && offset > event.offset {
+ offset = event.offset
+ }
+ }
+
+ return offset
+}
+
+func (c *KafkaTriggerConnection) Action(events []*cloudevents.Event) func() {
+ eventMap := map[string]cloudevents.Event{}
+ for _, event := range events {
+ if depName, ok := c.DependsOn(event); ok {
+ eventMap[depName] = *event
+ }
+ }
+
+ // If at least once is specified, we must call the action
+ // function before committing a transaction, otherwise the
+ // function must be called after. To call after we return a
+ // function.
+ var f func()
+ if c.atLeastOnce {
+ c.action(eventMap)
+ } else {
+ f = func() { c.action(eventMap) }
+ }
+
+ return f
+}
+
+func (c *KafkaTriggerConnection) satisfied() (interface{}, error) {
+ parameters := Parameters{}
+ for _, event := range c.events {
+ if depName, ok := c.DependsOn(event.Event); ok {
+ parameters[depName] = true
+ }
+ }
+
+ c.Logger.Infow("Evaluating", zap.String("expr", c.depExpression.String()), zap.Any("parameters", parameters))
+
+ return c.depExpression.Eval(parameters)
+}
+
+func (c *KafkaTriggerConnection) reset() {
+ c.events = nil
+}
+
+type Parameters map[string]bool
+
+func (p Parameters) Get(name string) (interface{}, error) {
+ return p[name], nil
+}
diff --git a/eventbus/stan/sensor/sensor_stan.go b/eventbus/stan/sensor/sensor_stan.go
index 31dd5c34ab..cc29b29910 100644
--- a/eventbus/stan/sensor/sensor_stan.go
+++ b/eventbus/stan/sensor/sensor_stan.go
@@ -1,6 +1,7 @@
package sensor
import (
+ "context"
"crypto/rand"
"fmt"
"math/big"
@@ -27,7 +28,7 @@ func (n *SensorSTAN) Initialize() error {
return nil
}
-func (n *SensorSTAN) Connect(triggerName string, dependencyExpression string, deps []eventbuscommon.Dependency) (eventbuscommon.TriggerConnection, error) {
+func (n *SensorSTAN) Connect(ctx context.Context, triggerName string, dependencyExpression string, deps []eventbuscommon.Dependency, atLeastOnce bool) (eventbuscommon.TriggerConnection, error) {
// Generate clientID with hash code
hashKey := fmt.Sprintf("%s-%s-%s", n.sensorName, triggerName, dependencyExpression)
randomNum, _ := rand.Int(rand.Reader, big.NewInt(int64(100)))
diff --git a/eventsources/sources/kafka/start.go b/eventsources/sources/kafka/start.go
index 429b43cc20..3f63f51fe3 100644
--- a/eventsources/sources/kafka/start.go
+++ b/eventsources/sources/kafka/start.go
@@ -218,6 +218,7 @@ func (el *EventListener) partitionConsumer(ctx context.Context, log *zap.Sugared
log.Info("dispatching event on the data channel...")
eventData := &events.KafkaEventData{
Topic: msg.Topic,
+ Key: string(msg.Key),
Partition: int(msg.Partition),
Timestamp: msg.Timestamp.String(),
Metadata: kafkaEventSource.Metadata,
@@ -384,6 +385,7 @@ func (consumer *Consumer) processOne(session sarama.ConsumerGroupSession, messag
consumer.logger.Info("dispatching event on the data channel...")
eventData := &events.KafkaEventData{
Topic: message.Topic,
+ Key: string(message.Key),
Partition: int(message.Partition),
Timestamp: message.Timestamp.String(),
Metadata: consumer.kafkaEventSource.Metadata,
diff --git a/mkdocs.yml b/mkdocs.yml
index 484971189a..e98328d806 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -55,6 +55,7 @@ nav:
- "eventbus/eventbus.md"
- "eventbus/stan.md"
- "eventbus/jetstream.md"
+ - "eventbus/kafka.md"
- "eventbus/antiaffinity.md"
- EventSources:
- Setup:
diff --git a/pkg/apis/common/common.go b/pkg/apis/common/common.go
index 7a1ac61c82..db9cb4b52a 100644
--- a/pkg/apis/common/common.go
+++ b/pkg/apis/common/common.go
@@ -107,6 +107,7 @@ type EventBusType string
var (
EventBusNATS EventBusType = "nats"
EventBusJetStream EventBusType = "jetstream"
+ EventBusKafka EventBusType = "kafka"
)
// BasicAuth contains the reference to K8s secrets that holds the username and password
diff --git a/pkg/apis/eventbus/v1alpha1/eventbus_types.go b/pkg/apis/eventbus/v1alpha1/eventbus_types.go
index a366aebbdc..deae6e8871 100644
--- a/pkg/apis/eventbus/v1alpha1/eventbus_types.go
+++ b/pkg/apis/eventbus/v1alpha1/eventbus_types.go
@@ -36,6 +36,9 @@ type EventBusSpec struct {
NATS *NATSBus `json:"nats,omitempty" protobuf:"bytes,1,opt,name=nats"`
// +optional
JetStream *JetStreamBus `json:"jetstream,omitempty" protobuf:"bytes,2,opt,name=jetstream"`
+ // +optional
+ // Kafka eventbus
+ Kafka *KafkaBus `json:"kafka,omitempty" protobuf:"bytes,3,opt,name=kafka"`
}
// EventBusStatus holds the status of the eventbus resource
@@ -51,6 +54,8 @@ type BusConfig struct {
NATS *NATSConfig `json:"nats,omitempty" protobuf:"bytes,1,opt,name=nats"`
// +optional
JetStream *JetStreamConfig `json:"jetstream,omitempty" protobuf:"bytes,2,opt,name=jetstream"`
+ // +optional
+ Kafka *KafkaBus `json:"kafka,omitempty" protobuf:"bytes,3,opt,name=kafka"`
}
const (
diff --git a/pkg/apis/eventbus/v1alpha1/generated.pb.go b/pkg/apis/eventbus/v1alpha1/generated.pb.go
index 3025c8d848..083e447e8c 100644
--- a/pkg/apis/eventbus/v1alpha1/generated.pb.go
+++ b/pkg/apis/eventbus/v1alpha1/generated.pb.go
@@ -272,10 +272,66 @@ func (m *JetStreamConfig) XXX_DiscardUnknown() {
var xxx_messageInfo_JetStreamConfig proto.InternalMessageInfo
+func (m *KafkaBus) Reset() { *m = KafkaBus{} }
+func (*KafkaBus) ProtoMessage() {}
+func (*KafkaBus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_871e47633eb7aad4, []int{8}
+}
+func (m *KafkaBus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KafkaBus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KafkaBus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KafkaBus.Merge(m, src)
+}
+func (m *KafkaBus) XXX_Size() int {
+ return m.Size()
+}
+func (m *KafkaBus) XXX_DiscardUnknown() {
+ xxx_messageInfo_KafkaBus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KafkaBus proto.InternalMessageInfo
+
+func (m *KafkaConsumerGroup) Reset() { *m = KafkaConsumerGroup{} }
+func (*KafkaConsumerGroup) ProtoMessage() {}
+func (*KafkaConsumerGroup) Descriptor() ([]byte, []int) {
+ return fileDescriptor_871e47633eb7aad4, []int{9}
+}
+func (m *KafkaConsumerGroup) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KafkaConsumerGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *KafkaConsumerGroup) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KafkaConsumerGroup.Merge(m, src)
+}
+func (m *KafkaConsumerGroup) XXX_Size() int {
+ return m.Size()
+}
+func (m *KafkaConsumerGroup) XXX_DiscardUnknown() {
+ xxx_messageInfo_KafkaConsumerGroup.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KafkaConsumerGroup proto.InternalMessageInfo
+
func (m *NATSBus) Reset() { *m = NATSBus{} }
func (*NATSBus) ProtoMessage() {}
func (*NATSBus) Descriptor() ([]byte, []int) {
- return fileDescriptor_871e47633eb7aad4, []int{8}
+ return fileDescriptor_871e47633eb7aad4, []int{10}
}
func (m *NATSBus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -303,7 +359,7 @@ var xxx_messageInfo_NATSBus proto.InternalMessageInfo
func (m *NATSConfig) Reset() { *m = NATSConfig{} }
func (*NATSConfig) ProtoMessage() {}
func (*NATSConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_871e47633eb7aad4, []int{9}
+ return fileDescriptor_871e47633eb7aad4, []int{11}
}
func (m *NATSConfig) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -331,7 +387,7 @@ var xxx_messageInfo_NATSConfig proto.InternalMessageInfo
func (m *NativeStrategy) Reset() { *m = NativeStrategy{} }
func (*NativeStrategy) ProtoMessage() {}
func (*NativeStrategy) Descriptor() ([]byte, []int) {
- return fileDescriptor_871e47633eb7aad4, []int{10}
+ return fileDescriptor_871e47633eb7aad4, []int{12}
}
func (m *NativeStrategy) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -359,7 +415,7 @@ var xxx_messageInfo_NativeStrategy proto.InternalMessageInfo
func (m *PersistenceStrategy) Reset() { *m = PersistenceStrategy{} }
func (*PersistenceStrategy) ProtoMessage() {}
func (*PersistenceStrategy) Descriptor() ([]byte, []int) {
- return fileDescriptor_871e47633eb7aad4, []int{11}
+ return fileDescriptor_871e47633eb7aad4, []int{13}
}
func (m *PersistenceStrategy) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -394,6 +450,8 @@ func init() {
proto.RegisterType((*JetStreamBus)(nil), "github.com.argoproj.argo_events.pkg.apis.eventbus.v1alpha1.JetStreamBus")
proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.eventbus.v1alpha1.JetStreamBus.NodeSelectorEntry")
proto.RegisterType((*JetStreamConfig)(nil), "github.com.argoproj.argo_events.pkg.apis.eventbus.v1alpha1.JetStreamConfig")
+ proto.RegisterType((*KafkaBus)(nil), "github.com.argoproj.argo_events.pkg.apis.eventbus.v1alpha1.KafkaBus")
+ proto.RegisterType((*KafkaConsumerGroup)(nil), "github.com.argoproj.argo_events.pkg.apis.eventbus.v1alpha1.KafkaConsumerGroup")
proto.RegisterType((*NATSBus)(nil), "github.com.argoproj.argo_events.pkg.apis.eventbus.v1alpha1.NATSBus")
proto.RegisterType((*NATSConfig)(nil), "github.com.argoproj.argo_events.pkg.apis.eventbus.v1alpha1.NATSConfig")
proto.RegisterType((*NativeStrategy)(nil), "github.com.argoproj.argo_events.pkg.apis.eventbus.v1alpha1.NativeStrategy")
@@ -406,120 +464,133 @@ func init() {
}
var fileDescriptor_871e47633eb7aad4 = []byte{
- // 1802 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xdb, 0x6e, 0x1b, 0xc7,
- 0x19, 0xd6, 0x4a, 0x94, 0x44, 0x0e, 0x69, 0x1d, 0x46, 0x4a, 0xb3, 0x16, 0x62, 0xd2, 0x60, 0x91,
- 0x42, 0x45, 0xe2, 0x65, 0x1d, 0x04, 0xad, 0x9b, 0x1b, 0x57, 0xab, 0x28, 0xb5, 0x1c, 0xd1, 0x51,
- 0x87, 0x8e, 0x81, 0xa6, 0x41, 0xdd, 0xd1, 0x6a, 0x44, 0xad, 0xcc, 0xdd, 0x61, 0x67, 0x66, 0x09,
- 0xb1, 0x57, 0x45, 0xfb, 0x02, 0x41, 0x51, 0x14, 0x7d, 0x83, 0x02, 0x7d, 0x80, 0xbe, 0x42, 0x7d,
- 0xd1, 0x8b, 0xdc, 0x35, 0x57, 0x44, 0xcc, 0xa0, 0x40, 0x81, 0xbe, 0x81, 0xaf, 0x8a, 0x99, 0x9d,
- 0x3d, 0x70, 0x77, 0x99, 0xc8, 0x26, 0xdd, 0xa0, 0x77, 0x9c, 0xff, 0xf0, 0xfd, 0x87, 0xfd, 0xf7,
- 0x9f, 0x6f, 0x09, 0xee, 0x77, 0x5d, 0x71, 0x1e, 0x9c, 0x58, 0x0e, 0xf5, 0x5a, 0x98, 0x75, 0x69,
- 0x9f, 0xd1, 0x0b, 0xf5, 0xe3, 0x16, 0x19, 0x10, 0x5f, 0xf0, 0x56, 0xff, 0x49, 0xb7, 0x85, 0xfb,
- 0x2e, 0x6f, 0xa9, 0xf3, 0x49, 0xc0, 0x5b, 0x83, 0xdb, 0xb8, 0xd7, 0x3f, 0xc7, 0xb7, 0x5b, 0x5d,
- 0xe2, 0x13, 0x86, 0x05, 0x39, 0xb5, 0xfa, 0x8c, 0x0a, 0x0a, 0xdf, 0x4b, 0xb0, 0xac, 0x08, 0x4b,
- 0xfd, 0x78, 0x1c, 0x62, 0x59, 0xfd, 0x27, 0x5d, 0x4b, 0x62, 0x59, 0x11, 0x96, 0x15, 0x61, 0xed,
- 0xdc, 0xbd, 0x72, 0x1e, 0x0e, 0xf5, 0x3c, 0xea, 0x67, 0x83, 0xef, 0xdc, 0x4a, 0x01, 0x74, 0x69,
- 0x97, 0xb6, 0x94, 0xf8, 0x24, 0x38, 0x53, 0x27, 0x75, 0x50, 0xbf, 0xb4, 0x79, 0xf3, 0xc9, 0x1d,
- 0x6e, 0xb9, 0x54, 0x42, 0xb6, 0x1c, 0xca, 0x48, 0x6b, 0x90, 0xab, 0x67, 0xe7, 0xdd, 0xc4, 0xc6,
- 0xc3, 0xce, 0xb9, 0xeb, 0x13, 0x36, 0x8c, 0xf2, 0x68, 0x31, 0xc2, 0x69, 0xc0, 0x1c, 0xf2, 0x42,
- 0x5e, 0xbc, 0xe5, 0x11, 0x81, 0x8b, 0x62, 0xb5, 0xa6, 0x79, 0xb1, 0xc0, 0x17, 0xae, 0x97, 0x0f,
- 0xf3, 0xc3, 0x6f, 0x72, 0xe0, 0xce, 0x39, 0xf1, 0x70, 0xd6, 0xaf, 0xf9, 0x1f, 0x03, 0x54, 0xec,
- 0x80, 0xef, 0x53, 0xff, 0xcc, 0xed, 0xc2, 0x53, 0x50, 0xf2, 0xb1, 0xe0, 0xa6, 0x71, 0xd3, 0xd8,
- 0xad, 0xbe, 0xf3, 0x81, 0xf5, 0xf2, 0x4f, 0xd0, 0x7a, 0xb0, 0xf7, 0xb0, 0x13, 0xa2, 0xda, 0xe5,
- 0xf1, 0xa8, 0x51, 0x92, 0x67, 0xa4, 0xd0, 0xe1, 0x25, 0xa8, 0x5c, 0x10, 0xc1, 0x05, 0x23, 0xd8,
- 0x33, 0x17, 0x55, 0xa8, 0x0f, 0x67, 0x09, 0x75, 0x9f, 0x88, 0x8e, 0x02, 0xd3, 0xf1, 0xae, 0x8d,
- 0x47, 0x8d, 0x4a, 0x2c, 0x44, 0x49, 0xb0, 0xe6, 0xdf, 0x16, 0xc1, 0xe6, 0x3e, 0xf5, 0x05, 0x96,
- 0xfd, 0x79, 0x48, 0xbc, 0x7e, 0x0f, 0x0b, 0x02, 0x7f, 0x0e, 0x2a, 0xd1, 0xe3, 0x8b, 0x4a, 0xdf,
- 0xb5, 0xc2, 0x7e, 0xca, 0x90, 0x96, 0x1c, 0x08, 0x6b, 0x70, 0xdb, 0x42, 0xda, 0x08, 0x91, 0x5f,
- 0x07, 0x2e, 0x23, 0x9e, 0xcc, 0xcb, 0xde, 0x7c, 0x3a, 0x6a, 0x2c, 0xc8, 0x80, 0x91, 0x96, 0xa3,
- 0x04, 0x0d, 0x9e, 0x80, 0x75, 0xd7, 0xc3, 0x5d, 0x72, 0x1c, 0xf4, 0x7a, 0xc7, 0xb4, 0xe7, 0x3a,
- 0x43, 0x55, 0x70, 0xc5, 0xbe, 0xa3, 0xdd, 0xd6, 0x0f, 0x27, 0xd5, 0xcf, 0x47, 0x8d, 0x1b, 0xf9,
- 0x59, 0xb4, 0x12, 0x03, 0x94, 0x05, 0x94, 0x31, 0x38, 0x71, 0x02, 0xe6, 0x8a, 0xa1, 0xac, 0x8d,
- 0x5c, 0x0a, 0x73, 0x49, 0x15, 0xf1, 0xdd, 0xa2, 0x22, 0x3a, 0x93, 0xa6, 0xf6, 0x96, 0x4c, 0x22,
- 0x23, 0x44, 0x59, 0xc0, 0xe6, 0x3f, 0x16, 0x41, 0xf9, 0x40, 0x36, 0xde, 0x0e, 0x38, 0xfc, 0x15,
- 0x28, 0xcb, 0xb9, 0x3d, 0xc5, 0x02, 0xeb, 0x76, 0xfd, 0x20, 0x15, 0x29, 0x1e, 0xbf, 0xe4, 0x91,
- 0x49, 0x6b, 0x19, 0xfb, 0xa3, 0x93, 0x0b, 0xe2, 0x88, 0x36, 0x11, 0xd8, 0x86, 0xba, 0x7e, 0x90,
- 0xc8, 0x50, 0x8c, 0x0a, 0x2f, 0x40, 0x89, 0xf7, 0x89, 0xa3, 0x87, 0xe3, 0xde, 0x2c, 0xc3, 0x11,
- 0x65, 0xdd, 0xe9, 0x13, 0xc7, 0xae, 0xe9, 0xa8, 0x25, 0x79, 0x42, 0x2a, 0x06, 0x64, 0x60, 0x85,
- 0x0b, 0x2c, 0x02, 0xae, 0xbb, 0x76, 0x7f, 0x2e, 0xd1, 0x14, 0xa2, 0xbd, 0xa6, 0xe3, 0xad, 0x84,
- 0x67, 0xa4, 0x23, 0x35, 0xff, 0x69, 0x80, 0x5a, 0x64, 0x7a, 0xe4, 0x72, 0x01, 0x3f, 0xcd, 0xb5,
- 0xd4, 0xba, 0x5a, 0x4b, 0xa5, 0xb7, 0x6a, 0xe8, 0x86, 0x0e, 0x55, 0x8e, 0x24, 0xa9, 0x76, 0xba,
- 0x60, 0xd9, 0x15, 0xc4, 0xe3, 0xe6, 0xe2, 0xcd, 0xa5, 0xdd, 0xea, 0x3b, 0xef, 0xcf, 0xa3, 0x42,
- 0xfb, 0x9a, 0x0e, 0xb8, 0x7c, 0x28, 0xa1, 0x51, 0x18, 0xa1, 0xf9, 0xef, 0x54, 0x65, 0xb2, 0xc9,
- 0x10, 0x4f, 0xac, 0x94, 0xfd, 0x59, 0x57, 0x8a, 0x8c, 0x9c, 0xdd, 0x27, 0x41, 0x7e, 0x9f, 0xdc,
- 0x9b, 0xcb, 0x3e, 0x51, 0x65, 0x4e, 0x5d, 0x26, 0x5f, 0x1a, 0x60, 0x6d, 0xf2, 0x79, 0xc3, 0xc7,
- 0xf1, 0x2c, 0x85, 0xe5, 0xfe, 0xe8, 0xea, 0x69, 0x84, 0xf7, 0x98, 0xf5, 0xf5, 0x83, 0x03, 0x3d,
- 0xb0, 0xe2, 0xa8, 0x25, 0xa7, 0xeb, 0x3c, 0x98, 0xa5, 0xce, 0x78, 0xef, 0x27, 0xe1, 0xc2, 0x33,
- 0xd2, 0x41, 0x9a, 0xbf, 0x5f, 0x03, 0xb5, 0x74, 0x37, 0xe0, 0xf7, 0xc1, 0xea, 0x80, 0x30, 0xee,
- 0x52, 0x5f, 0x55, 0x58, 0xb1, 0xd7, 0xb5, 0xe7, 0xea, 0xa3, 0x50, 0x8c, 0x22, 0x3d, 0xdc, 0x05,
- 0x65, 0x46, 0xfa, 0x3d, 0xd7, 0xc1, 0x5c, 0x25, 0xbb, 0x6c, 0xd7, 0xe4, 0x78, 0x22, 0x2d, 0x43,
- 0xb1, 0x16, 0xfe, 0xc1, 0x00, 0x9b, 0x4e, 0x76, 0x2b, 0xeb, 0xb7, 0xb1, 0x3d, 0x4b, 0x81, 0xb9,
- 0x55, 0x6f, 0xbf, 0x36, 0x1e, 0x35, 0xf2, 0x37, 0x00, 0xca, 0x87, 0x87, 0x7f, 0x35, 0xc0, 0x75,
- 0x46, 0x7a, 0x14, 0x9f, 0x12, 0x96, 0x73, 0x30, 0x4b, 0xaf, 0x22, 0xb9, 0x1b, 0xe3, 0x51, 0xe3,
- 0x3a, 0x9a, 0x16, 0x13, 0x4d, 0x4f, 0x07, 0xfe, 0xc5, 0x00, 0xa6, 0x47, 0x04, 0x73, 0x1d, 0x9e,
- 0xcf, 0x75, 0xf9, 0x55, 0xe4, 0xfa, 0xc6, 0x78, 0xd4, 0x30, 0xdb, 0x53, 0x42, 0xa2, 0xa9, 0xc9,
- 0xc0, 0xdf, 0x19, 0xa0, 0xda, 0x97, 0x13, 0xc2, 0x05, 0xf1, 0x1d, 0x62, 0xae, 0xa8, 0xe4, 0x3e,
- 0x9a, 0x25, 0xb9, 0xe3, 0x04, 0xae, 0x23, 0x24, 0xb7, 0xe9, 0x0e, 0xed, 0xf5, 0xf1, 0xa8, 0x51,
- 0x4d, 0x29, 0x50, 0x3a, 0x28, 0x74, 0x52, 0xdb, 0x76, 0x55, 0x25, 0xf0, 0xe3, 0x17, 0x7e, 0x51,
- 0xdb, 0x1a, 0x20, 0x9c, 0xea, 0xe8, 0x94, 0x5a, 0xba, 0x7f, 0x34, 0x40, 0xcd, 0xa7, 0xa7, 0xa4,
- 0x43, 0x7a, 0xc4, 0x11, 0x94, 0x99, 0x65, 0xb5, 0x7c, 0x3f, 0x99, 0xd7, 0x66, 0xb2, 0x1e, 0xa4,
- 0xc0, 0x0f, 0x7c, 0xc1, 0x86, 0xf6, 0xb6, 0x7e, 0x19, 0x6b, 0x69, 0x15, 0x9a, 0xc8, 0x02, 0x7e,
- 0x0c, 0xaa, 0x82, 0xf6, 0x24, 0x07, 0x74, 0xa9, 0xcf, 0xcd, 0x8a, 0x4a, 0xaa, 0x5e, 0xc4, 0x14,
- 0x1e, 0xc6, 0x66, 0xf6, 0x96, 0x06, 0xae, 0x26, 0x32, 0x8e, 0xd2, 0x38, 0x90, 0xe4, 0x49, 0x08,
- 0x50, 0x9d, 0xfd, 0x5e, 0x11, 0xf4, 0x31, 0x3d, 0x7d, 0x29, 0x1e, 0x02, 0x7d, 0xb0, 0x11, 0xd3,
- 0x9f, 0x0e, 0x71, 0x18, 0x11, 0xdc, 0xac, 0xaa, 0x12, 0x0a, 0x19, 0xdb, 0x11, 0x75, 0x70, 0x2f,
- 0x64, 0x18, 0x88, 0x9c, 0x11, 0x26, 0x9f, 0xbe, 0x6d, 0xea, 0x62, 0x36, 0x0e, 0x33, 0x48, 0x28,
- 0x87, 0x0d, 0x7f, 0x0a, 0x36, 0xfb, 0xcc, 0xa5, 0x2a, 0x85, 0x1e, 0xe6, 0xfc, 0x01, 0xf6, 0x88,
- 0x59, 0x53, 0x9b, 0xef, 0xba, 0x86, 0xd9, 0x3c, 0xce, 0x1a, 0xa0, 0xbc, 0x8f, 0xdc, 0x86, 0x91,
- 0xd0, 0xbc, 0x96, 0x6c, 0xc3, 0xc8, 0x17, 0xc5, 0x5a, 0xf8, 0x01, 0x28, 0xe3, 0xb3, 0x33, 0xd7,
- 0x97, 0x96, 0x6b, 0xaa, 0x85, 0x6f, 0x14, 0x95, 0xb6, 0xa7, 0x6d, 0x42, 0x9c, 0xe8, 0x84, 0x62,
- 0x5f, 0x78, 0x1f, 0x40, 0x4e, 0xd8, 0xc0, 0x75, 0xc8, 0x9e, 0xe3, 0xd0, 0xc0, 0x17, 0x2a, 0xf7,
- 0x75, 0x95, 0xfb, 0x8e, 0xce, 0x1d, 0x76, 0x72, 0x16, 0xa8, 0xc0, 0x4b, 0x66, 0xcf, 0x89, 0x10,
- 0xae, 0xdf, 0xe5, 0xe6, 0x86, 0x42, 0x50, 0x51, 0x3b, 0x5a, 0x86, 0x62, 0x2d, 0x7c, 0x0b, 0x54,
- 0xb8, 0xc0, 0x4c, 0xec, 0xb1, 0x2e, 0x37, 0x37, 0x6f, 0x2e, 0xed, 0x56, 0xc2, 0x1b, 0xb4, 0x13,
- 0x09, 0x51, 0xa2, 0x87, 0xef, 0x82, 0x1a, 0x4f, 0x11, 0x77, 0x13, 0x2a, 0xe8, 0x0d, 0x39, 0xc1,
- 0x69, 0x42, 0x8f, 0x26, 0xac, 0xa0, 0x05, 0x80, 0x87, 0x2f, 0x8f, 0xf1, 0x50, 0x6e, 0x43, 0x73,
- 0x4b, 0xf9, 0xac, 0x49, 0x2a, 0xd9, 0x8e, 0xa5, 0x28, 0x65, 0xb1, 0x73, 0x17, 0x6c, 0xe6, 0x5e,
- 0x15, 0xb8, 0x01, 0x96, 0x9e, 0x90, 0x61, 0x78, 0x89, 0x21, 0xf9, 0x13, 0x6e, 0x83, 0xe5, 0x01,
- 0xee, 0x05, 0x24, 0x24, 0xe8, 0x28, 0x3c, 0xbc, 0xb7, 0x78, 0xc7, 0x68, 0xfe, 0xdd, 0x00, 0xeb,
- 0x99, 0x6f, 0x0c, 0x78, 0x03, 0x2c, 0x05, 0xac, 0xa7, 0x2f, 0xc1, 0xaa, 0x6e, 0xe7, 0xd2, 0xc7,
- 0xe8, 0x08, 0x49, 0x39, 0xfc, 0x05, 0xa8, 0x61, 0xc7, 0x21, 0x9c, 0x87, 0x83, 0xa4, 0x6f, 0xeb,
- 0x37, 0xa7, 0x10, 0x72, 0x46, 0xc4, 0x87, 0x64, 0x18, 0x25, 0x18, 0x36, 0x60, 0x2f, 0xe5, 0x8e,
- 0x26, 0xc0, 0xe0, 0x9d, 0x4c, 0xdb, 0x96, 0x54, 0x12, 0xf1, 0xcb, 0x3f, 0xbd, 0x75, 0xcd, 0x7f,
- 0x19, 0x60, 0x55, 0xb3, 0x28, 0xe8, 0x83, 0x15, 0x1f, 0x0b, 0x77, 0x40, 0x34, 0x57, 0x99, 0x89,
- 0xf7, 0x3e, 0x50, 0x48, 0xf1, 0xfa, 0x05, 0x92, 0x4b, 0x84, 0x32, 0xa4, 0xa3, 0xc0, 0x0b, 0xb0,
- 0x42, 0x2e, 0xa9, 0x70, 0x23, 0x56, 0x3f, 0xaf, 0xaf, 0x4b, 0x15, 0xeb, 0x40, 0x21, 0x23, 0x1d,
- 0xa1, 0xf9, 0x95, 0x01, 0x40, 0x62, 0xf2, 0x4d, 0x0f, 0xeb, 0x2d, 0x50, 0x71, 0x7a, 0x01, 0x17,
- 0x84, 0x1d, 0xbe, 0xaf, 0x3f, 0xcf, 0xd4, 0xcc, 0xee, 0x47, 0x42, 0x94, 0xe8, 0xe1, 0xdb, 0xa0,
- 0x84, 0x03, 0x71, 0xae, 0x9b, 0x6e, 0x4a, 0x2a, 0xba, 0x17, 0x88, 0xf3, 0xe7, 0xf2, 0x91, 0x05,
- 0xe2, 0x3c, 0x6a, 0x01, 0x52, 0x56, 0xb9, 0x39, 0x28, 0xcd, 0x71, 0x0e, 0x9a, 0x9f, 0xad, 0x83,
- 0xb5, 0xc9, 0xc6, 0xc3, 0xb7, 0x53, 0xa4, 0xcb, 0x50, 0x6b, 0x26, 0xfe, 0x2e, 0x28, 0x20, 0x5e,
- 0x51, 0x2d, 0x8b, 0x57, 0xaa, 0x25, 0x7b, 0x75, 0x2f, 0x7d, 0x1b, 0x57, 0x77, 0x31, 0x57, 0x2c,
- 0x7d, 0xbb, 0x5c, 0xf1, 0xff, 0x87, 0x7e, 0xfd, 0x29, 0x4b, 0x4a, 0x56, 0xd4, 0xe5, 0xf9, 0xe9,
- 0xfc, 0xde, 0xfd, 0xf9, 0xd0, 0x92, 0xd5, 0x39, 0xd1, 0x92, 0x34, 0xd3, 0x2b, 0xbf, 0x2a, 0xa6,
- 0x57, 0xc0, 0x7d, 0x2a, 0xaf, 0x80, 0xfb, 0x34, 0xc1, 0x8a, 0x87, 0x2f, 0xf7, 0xba, 0x44, 0x31,
- 0xab, 0x4a, 0xb8, 0xf8, 0xda, 0x4a, 0x82, 0xb4, 0xe6, 0x7f, 0xce, 0x8f, 0x8a, 0x49, 0x46, 0xed,
- 0xa5, 0x48, 0x46, 0x21, 0xd7, 0xba, 0x36, 0x23, 0xd7, 0x5a, 0xbb, 0x32, 0xd7, 0x5a, 0x9f, 0x81,
- 0x6b, 0xbd, 0x09, 0x56, 0x3d, 0x7c, 0xd9, 0xe6, 0x9a, 0x1e, 0x95, 0xec, 0xaa, 0xfc, 0x24, 0x6e,
- 0x87, 0x22, 0x14, 0xe9, 0x64, 0x62, 0x1e, 0xbe, 0xb4, 0x87, 0x82, 0x48, 0x6e, 0x14, 0xd3, 0xa8,
- 0xb6, 0x96, 0xa1, 0x58, 0xab, 0x01, 0x3b, 0xc1, 0x09, 0x57, 0xa4, 0x28, 0x01, 0x94, 0x22, 0x14,
- 0xe9, 0x5e, 0x94, 0x0a, 0xc1, 0x23, 0xb0, 0xcd, 0xf0, 0x99, 0xb8, 0x47, 0x30, 0x13, 0x27, 0x04,
- 0x8b, 0x87, 0xae, 0x47, 0x68, 0x20, 0xcc, 0xed, 0xf8, 0x02, 0xd8, 0x46, 0x05, 0x7a, 0x54, 0xe8,
- 0x05, 0x0f, 0xc1, 0x96, 0x94, 0x1f, 0xc8, 0x57, 0xd8, 0xa5, 0x7e, 0x04, 0xf6, 0x9a, 0x02, 0x7b,
- 0x7d, 0x3c, 0x6a, 0x6c, 0xa1, 0xbc, 0x1a, 0x15, 0xf9, 0xc0, 0x9f, 0x80, 0x0d, 0x29, 0x3e, 0x22,
- 0x98, 0x93, 0x08, 0xe7, 0x3b, 0x21, 0xad, 0x91, 0x93, 0x88, 0x32, 0x3a, 0x94, 0xb3, 0x86, 0xfb,
- 0x60, 0x53, 0xca, 0xf6, 0xa9, 0xe7, 0xb9, 0x71, 0x5d, 0xaf, 0x2b, 0x08, 0xb5, 0xc8, 0x51, 0x56,
- 0x89, 0xf2, 0xf6, 0xb3, 0x53, 0xc5, 0x3f, 0x2f, 0x82, 0xad, 0x82, 0x4b, 0x4d, 0xd6, 0xc7, 0x05,
- 0x65, 0xb8, 0x4b, 0x92, 0xd1, 0x36, 0x92, 0xfa, 0x3a, 0x19, 0x1d, 0xca, 0x59, 0xc3, 0xc7, 0x00,
- 0x84, 0x97, 0x7f, 0x9b, 0x9e, 0xea, 0xc0, 0xf6, 0x5d, 0xf9, 0xa8, 0xf7, 0x62, 0xe9, 0xf3, 0x51,
- 0xe3, 0x56, 0xd1, 0x7f, 0xc7, 0x51, 0x3e, 0xe2, 0x11, 0xed, 0x05, 0x1e, 0x49, 0x1c, 0x50, 0x0a,
- 0x12, 0xfe, 0x12, 0x80, 0x81, 0xd2, 0x77, 0xdc, 0xdf, 0x44, 0x97, 0xfb, 0xd7, 0xfe, 0x09, 0x69,
- 0x45, 0x7f, 0x73, 0x5b, 0x3f, 0x0b, 0xb0, 0x2f, 0xe4, 0xfb, 0xa1, 0x66, 0xef, 0x51, 0x8c, 0x82,
- 0x52, 0x88, 0xb6, 0xf5, 0xf4, 0x59, 0x7d, 0xe1, 0xf3, 0x67, 0xf5, 0x85, 0x2f, 0x9e, 0xd5, 0x17,
- 0x7e, 0x3b, 0xae, 0x1b, 0x4f, 0xc7, 0x75, 0xe3, 0xf3, 0x71, 0xdd, 0xf8, 0x62, 0x5c, 0x37, 0xbe,
- 0x1c, 0xd7, 0x8d, 0xcf, 0xbe, 0xaa, 0x2f, 0x7c, 0x52, 0x8e, 0xae, 0x95, 0xff, 0x06, 0x00, 0x00,
- 0xff, 0xff, 0xca, 0xde, 0x5c, 0x19, 0x8b, 0x1a, 0x00, 0x00,
+ // 2011 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0x4f, 0x6f, 0x1b, 0xc7,
+ 0x15, 0xd7, 0x8a, 0x94, 0x44, 0x0e, 0xa9, 0x3f, 0x1c, 0x29, 0xcd, 0x5a, 0x88, 0x49, 0x83, 0x41,
+ 0x0a, 0x17, 0x89, 0x97, 0x75, 0x91, 0xb6, 0xae, 0x7b, 0x70, 0xb9, 0x8a, 0x62, 0xcb, 0x16, 0x65,
+ 0x75, 0x28, 0x1b, 0x48, 0x1a, 0xd4, 0x1d, 0xad, 0x46, 0xd4, 0x4a, 0xfb, 0x87, 0x9d, 0x99, 0x25,
+ 0xc4, 0x9e, 0x8a, 0xf6, 0xd0, 0x6b, 0x50, 0x14, 0x45, 0xbf, 0x41, 0x81, 0xde, 0xdb, 0x6f, 0x50,
+ 0xd4, 0x87, 0x02, 0x0d, 0x7a, 0x69, 0x4e, 0x44, 0xcc, 0xa0, 0x5f, 0xc2, 0xa7, 0x62, 0x66, 0x67,
+ 0xff, 0x90, 0x4b, 0xd9, 0x92, 0x29, 0xd7, 0xc8, 0x6d, 0xe7, 0xbd, 0x37, 0xbf, 0xf7, 0xe6, 0xcd,
+ 0xcc, 0x7b, 0xbf, 0x21, 0xc1, 0xfd, 0x8e, 0xcd, 0x8f, 0x82, 0x7d, 0xc3, 0xf2, 0xdd, 0x06, 0xa6,
+ 0x1d, 0xbf, 0x4b, 0xfd, 0x63, 0xf9, 0x71, 0x83, 0xf4, 0x88, 0xc7, 0x59, 0xa3, 0x7b, 0xd2, 0x69,
+ 0xe0, 0xae, 0xcd, 0x1a, 0x72, 0xbc, 0x1f, 0xb0, 0x46, 0xef, 0x26, 0x76, 0xba, 0x47, 0xf8, 0x66,
+ 0xa3, 0x43, 0x3c, 0x42, 0x31, 0x27, 0x07, 0x46, 0x97, 0xfa, 0xdc, 0x87, 0xb7, 0x13, 0x2c, 0x23,
+ 0xc2, 0x92, 0x1f, 0x4f, 0x42, 0x2c, 0xa3, 0x7b, 0xd2, 0x31, 0x04, 0x96, 0x11, 0x61, 0x19, 0x11,
+ 0xd6, 0xfa, 0x9d, 0x73, 0xc7, 0x61, 0xf9, 0xae, 0xeb, 0x7b, 0xe3, 0xce, 0xd7, 0x6f, 0xa4, 0x00,
+ 0x3a, 0x7e, 0xc7, 0x6f, 0x48, 0xf1, 0x7e, 0x70, 0x28, 0x47, 0x72, 0x20, 0xbf, 0x94, 0x79, 0xfd,
+ 0xe4, 0x16, 0x33, 0x6c, 0x5f, 0x40, 0x36, 0x2c, 0x9f, 0x92, 0x46, 0x2f, 0xb3, 0x9e, 0xf5, 0x0f,
+ 0x13, 0x1b, 0x17, 0x5b, 0x47, 0xb6, 0x47, 0x68, 0x3f, 0x8a, 0xa3, 0x41, 0x09, 0xf3, 0x03, 0x6a,
+ 0x91, 0x0b, 0xcd, 0x62, 0x0d, 0x97, 0x70, 0x3c, 0xc9, 0x57, 0xe3, 0xac, 0x59, 0x34, 0xf0, 0xb8,
+ 0xed, 0x66, 0xdd, 0xfc, 0xe0, 0x65, 0x13, 0x98, 0x75, 0x44, 0x5c, 0x3c, 0x3e, 0xaf, 0xfe, 0xef,
+ 0x59, 0x50, 0x34, 0x03, 0xb6, 0xe1, 0x7b, 0x87, 0x76, 0x07, 0x1e, 0x80, 0xbc, 0x87, 0x39, 0xd3,
+ 0xb5, 0x6b, 0xda, 0xf5, 0xd2, 0xf7, 0x3e, 0x36, 0x5e, 0x7d, 0x07, 0x8d, 0x9d, 0xe6, 0x5e, 0x3b,
+ 0x44, 0x35, 0x0b, 0xc3, 0x41, 0x2d, 0x2f, 0xc6, 0x48, 0xa2, 0xc3, 0x53, 0x50, 0x3c, 0x26, 0x9c,
+ 0x71, 0x4a, 0xb0, 0xab, 0xcf, 0x4a, 0x57, 0x0f, 0xa6, 0x71, 0x75, 0x9f, 0xf0, 0xb6, 0x04, 0x53,
+ 0xfe, 0x16, 0x87, 0x83, 0x5a, 0x31, 0x16, 0xa2, 0xc4, 0x19, 0x24, 0x60, 0xee, 0x04, 0x1f, 0x9e,
+ 0x60, 0x3d, 0x27, 0xbd, 0x7e, 0x34, 0x8d, 0xd7, 0x07, 0x02, 0xc8, 0x0c, 0x98, 0x59, 0x1c, 0x0e,
+ 0x6a, 0x73, 0x72, 0x84, 0x42, 0xf4, 0xfa, 0xdf, 0x66, 0x41, 0x65, 0xc3, 0xf7, 0x38, 0x16, 0xdb,
+ 0xb0, 0x47, 0xdc, 0xae, 0x83, 0x39, 0x81, 0x9f, 0x80, 0x62, 0x74, 0x4a, 0xa2, 0x0c, 0x5f, 0x37,
+ 0xc2, 0x6d, 0x13, 0x3e, 0x0c, 0x71, 0xee, 0x8c, 0xde, 0x4d, 0x03, 0x29, 0x23, 0x44, 0x7e, 0x19,
+ 0xd8, 0x94, 0xb8, 0x22, 0x10, 0xb3, 0xf2, 0x74, 0x50, 0x9b, 0x11, 0xeb, 0x8a, 0xb4, 0x0c, 0x25,
+ 0x68, 0x70, 0x1f, 0x2c, 0xdb, 0x2e, 0xee, 0x90, 0xdd, 0xc0, 0x71, 0x76, 0x7d, 0xc7, 0xb6, 0xfa,
+ 0x32, 0xaf, 0x45, 0xf3, 0x96, 0x9a, 0xb6, 0xbc, 0x35, 0xaa, 0x7e, 0x3e, 0xa8, 0x5d, 0xcd, 0x1e,
+ 0x79, 0x23, 0x31, 0x40, 0xe3, 0x80, 0xc2, 0x07, 0x23, 0x56, 0x40, 0x6d, 0xde, 0x17, 0x6b, 0x23,
+ 0xa7, 0x5c, 0x65, 0xf1, 0xdd, 0x49, 0x8b, 0x68, 0x8f, 0x9a, 0x9a, 0xab, 0x22, 0x88, 0x31, 0x21,
+ 0x1a, 0x07, 0xac, 0xff, 0x73, 0x16, 0x14, 0x36, 0x45, 0xa6, 0xcd, 0x80, 0xc1, 0x5f, 0x80, 0x82,
+ 0xb8, 0x1e, 0x07, 0x98, 0x63, 0x95, 0xae, 0xef, 0xa6, 0x3c, 0xc5, 0xa7, 0x3c, 0xd9, 0x23, 0x61,
+ 0x2d, 0x7c, 0x3f, 0xdc, 0x3f, 0x26, 0x16, 0x6f, 0x11, 0x8e, 0x4d, 0xa8, 0xd6, 0x0f, 0x12, 0x19,
+ 0x8a, 0x51, 0xe1, 0x31, 0xc8, 0xb3, 0x2e, 0xb1, 0xd4, 0x19, 0xbc, 0x37, 0xcd, 0x69, 0x88, 0xa2,
+ 0x6e, 0x77, 0x89, 0x65, 0x96, 0x95, 0xd7, 0xbc, 0x18, 0x21, 0xe9, 0x03, 0x52, 0x30, 0xcf, 0x38,
+ 0xe6, 0x01, 0x53, 0x59, 0xbb, 0x7f, 0x29, 0xde, 0x24, 0xa2, 0xb9, 0xa4, 0xfc, 0xcd, 0x87, 0x63,
+ 0xa4, 0x3c, 0xd5, 0xff, 0xa3, 0x81, 0x72, 0x64, 0xba, 0x6d, 0x33, 0x0e, 0x3f, 0xcb, 0xa4, 0xd4,
+ 0x38, 0x5f, 0x4a, 0xc5, 0x6c, 0x99, 0xd0, 0x15, 0xe5, 0xaa, 0x10, 0x49, 0x52, 0xe9, 0xb4, 0xc1,
+ 0x9c, 0xcd, 0x89, 0xcb, 0xf4, 0xd9, 0x6b, 0xb9, 0x69, 0x6f, 0x57, 0x14, 0xb6, 0xb9, 0xa8, 0x1c,
+ 0xce, 0x6d, 0x09, 0x68, 0x14, 0x7a, 0xa8, 0xff, 0x6b, 0x36, 0x59, 0x99, 0x48, 0x32, 0xc4, 0x23,
+ 0x95, 0x6b, 0x63, 0xda, 0xca, 0x25, 0x3c, 0x8f, 0x97, 0xad, 0x20, 0x5b, 0xb6, 0xee, 0x5d, 0x4a,
+ 0xd9, 0x92, 0xcb, 0x7c, 0xd3, 0x35, 0xeb, 0x2b, 0x0d, 0x2c, 0x8d, 0x1e, 0x2b, 0xf8, 0x24, 0x3e,
+ 0xb2, 0x61, 0x56, 0x7f, 0x78, 0x7e, 0xd7, 0x61, 0x57, 0x36, 0x5e, 0x7c, 0x3e, 0xa1, 0x0b, 0xe6,
+ 0x2d, 0x59, 0xb2, 0x55, 0x3a, 0x37, 0xa7, 0x59, 0x5b, 0xdc, 0xc5, 0x12, 0x77, 0xe1, 0x18, 0x29,
+ 0x27, 0xf5, 0xdf, 0x2e, 0x81, 0x72, 0x3a, 0xe9, 0xf0, 0x3b, 0x60, 0xa1, 0x47, 0x28, 0xb3, 0x7d,
+ 0x4f, 0xae, 0xb0, 0x68, 0x2e, 0xab, 0x99, 0x0b, 0x8f, 0x43, 0x31, 0x8a, 0xf4, 0xf0, 0x3a, 0x28,
+ 0x50, 0xd2, 0x75, 0x6c, 0x0b, 0x33, 0x19, 0xec, 0x9c, 0x59, 0x16, 0xb7, 0x00, 0x29, 0x19, 0x8a,
+ 0xb5, 0xf0, 0xf7, 0x1a, 0xa8, 0x58, 0xe3, 0xc5, 0x5f, 0x6d, 0x5e, 0x6b, 0x9a, 0x05, 0x66, 0x3a,
+ 0x8a, 0xf9, 0xd6, 0x70, 0x50, 0xcb, 0x36, 0x1a, 0x94, 0x75, 0x0f, 0xff, 0xa2, 0x81, 0x2b, 0x94,
+ 0x38, 0x3e, 0x3e, 0x20, 0x34, 0x33, 0x41, 0xcf, 0xbf, 0x8e, 0xe0, 0xae, 0x0e, 0x07, 0xb5, 0x2b,
+ 0xe8, 0x2c, 0x9f, 0xe8, 0xec, 0x70, 0xe0, 0x9f, 0x35, 0xa0, 0xbb, 0x84, 0x53, 0xdb, 0x62, 0xd9,
+ 0x58, 0xe7, 0x5e, 0x47, 0xac, 0xef, 0x0c, 0x07, 0x35, 0xbd, 0x75, 0x86, 0x4b, 0x74, 0x66, 0x30,
+ 0xf0, 0x37, 0x1a, 0x28, 0x75, 0xc5, 0x09, 0x61, 0x9c, 0x78, 0x16, 0xd1, 0xe7, 0x65, 0x70, 0x0f,
+ 0xa7, 0x09, 0x6e, 0x37, 0x81, 0x6b, 0x73, 0xc1, 0xd4, 0x3a, 0x7d, 0x73, 0x79, 0x38, 0xa8, 0x95,
+ 0x52, 0x0a, 0x94, 0x76, 0x0a, 0xad, 0x54, 0x51, 0x5f, 0x90, 0x01, 0xfc, 0xe8, 0xc2, 0x17, 0xb5,
+ 0xa5, 0x00, 0xc2, 0x53, 0x1d, 0x8d, 0x52, 0xb5, 0xfd, 0x0f, 0x1a, 0x28, 0x7b, 0xfe, 0x01, 0x69,
+ 0x13, 0x87, 0x58, 0xdc, 0xa7, 0x7a, 0x41, 0xd6, 0xf8, 0x4f, 0x2f, 0xab, 0x00, 0x1a, 0x3b, 0x29,
+ 0xf0, 0x4d, 0x8f, 0xd3, 0xbe, 0xb9, 0xa6, 0x2e, 0x63, 0x39, 0xad, 0x42, 0x23, 0x51, 0xc0, 0x47,
+ 0xa0, 0xc4, 0x7d, 0x47, 0x30, 0x5a, 0xdb, 0xf7, 0x98, 0x5e, 0x94, 0x41, 0x55, 0x27, 0x11, 0x92,
+ 0xbd, 0xd8, 0xcc, 0x5c, 0x55, 0xc0, 0xa5, 0x44, 0xc6, 0x50, 0x1a, 0x07, 0x92, 0x2c, 0xd7, 0x01,
+ 0x32, 0xb3, 0xdf, 0x9e, 0x04, 0xbd, 0xeb, 0x1f, 0xbc, 0x12, 0xdd, 0x81, 0x1e, 0x58, 0x89, 0x59,
+ 0x56, 0x9b, 0x58, 0x94, 0x70, 0xa6, 0x97, 0xe4, 0x12, 0x26, 0x12, 0xc3, 0x6d, 0xdf, 0xc2, 0x4e,
+ 0x48, 0x64, 0x10, 0x39, 0x24, 0x54, 0xec, 0xbe, 0xa9, 0xab, 0xc5, 0xac, 0x6c, 0x8d, 0x21, 0xa1,
+ 0x0c, 0x36, 0xbc, 0x0b, 0x2a, 0x5d, 0x6a, 0xfb, 0x32, 0x04, 0x07, 0x33, 0xb6, 0x83, 0x5d, 0xa2,
+ 0x97, 0x65, 0xe5, 0xbb, 0xa2, 0x60, 0x2a, 0xbb, 0xe3, 0x06, 0x28, 0x3b, 0x47, 0x54, 0xc3, 0x48,
+ 0xa8, 0x2f, 0x26, 0xd5, 0x30, 0x9a, 0x8b, 0x62, 0x2d, 0xfc, 0x18, 0x14, 0xf0, 0xe1, 0xa1, 0xed,
+ 0x09, 0xcb, 0x25, 0x99, 0xc2, 0x77, 0x26, 0x2d, 0xad, 0xa9, 0x6c, 0x42, 0x9c, 0x68, 0x84, 0xe2,
+ 0xb9, 0xf0, 0x3e, 0x80, 0x8c, 0xd0, 0x9e, 0x6d, 0x91, 0xa6, 0x65, 0xf9, 0x81, 0xc7, 0x65, 0xec,
+ 0xcb, 0x32, 0xf6, 0x75, 0x15, 0x3b, 0x6c, 0x67, 0x2c, 0xd0, 0x84, 0x59, 0x22, 0x7a, 0x46, 0x38,
+ 0xb7, 0xbd, 0x0e, 0xd3, 0x57, 0x24, 0x82, 0xf4, 0xda, 0x56, 0x32, 0x14, 0x6b, 0xe1, 0xfb, 0xa0,
+ 0xc8, 0x38, 0xa6, 0xbc, 0x49, 0x3b, 0x4c, 0xaf, 0x5c, 0xcb, 0x5d, 0x2f, 0x86, 0x8d, 0xba, 0x1d,
+ 0x09, 0x51, 0xa2, 0x87, 0x1f, 0x82, 0x32, 0x4b, 0x3d, 0x43, 0x74, 0x28, 0xa1, 0x57, 0xc4, 0x09,
+ 0x4e, 0x3f, 0x4f, 0xd0, 0x88, 0x15, 0x34, 0x00, 0x70, 0xf1, 0xe9, 0x2e, 0xee, 0x8b, 0x6a, 0xa8,
+ 0xaf, 0xca, 0x39, 0x4b, 0x82, 0xb1, 0xb6, 0x62, 0x29, 0x4a, 0x59, 0xac, 0xdf, 0x01, 0x95, 0xcc,
+ 0x55, 0x81, 0x2b, 0x20, 0x77, 0x42, 0xfa, 0x61, 0x13, 0x43, 0xe2, 0x13, 0xae, 0x81, 0xb9, 0x1e,
+ 0x76, 0x02, 0x12, 0xbe, 0x03, 0x50, 0x38, 0xb8, 0x3d, 0x7b, 0x4b, 0xab, 0xff, 0x43, 0x03, 0xcb,
+ 0x63, 0x2f, 0x26, 0x78, 0x15, 0xe4, 0x02, 0xea, 0xa8, 0x26, 0x58, 0x52, 0xe9, 0xcc, 0x3d, 0x42,
+ 0xdb, 0x48, 0xc8, 0xe1, 0xcf, 0x40, 0x19, 0x5b, 0x16, 0x61, 0x2c, 0x3c, 0x48, 0xaa, 0x5b, 0xbf,
+ 0x77, 0x06, 0xef, 0xa7, 0x84, 0x3f, 0x20, 0xfd, 0x28, 0xc0, 0x30, 0x01, 0xcd, 0xd4, 0x74, 0x34,
+ 0x02, 0x06, 0x6f, 0x8d, 0xa5, 0x2d, 0x27, 0x83, 0x88, 0x2f, 0xff, 0xd9, 0xa9, 0xab, 0xff, 0x35,
+ 0x07, 0x0a, 0x11, 0xa3, 0x79, 0xd9, 0x12, 0xde, 0x05, 0x73, 0xdc, 0xef, 0xda, 0x96, 0x7a, 0x17,
+ 0xc5, 0xac, 0x72, 0x4f, 0x08, 0x51, 0xa8, 0x4b, 0xf3, 0x81, 0xdc, 0x4b, 0xf8, 0xc0, 0x23, 0x90,
+ 0xe3, 0x0e, 0x53, 0x9d, 0xf3, 0xf6, 0x85, 0xeb, 0xed, 0xde, 0x76, 0xf4, 0x38, 0x5e, 0x10, 0x61,
+ 0xee, 0x6d, 0xb7, 0x91, 0xc0, 0x83, 0x9f, 0x80, 0x3c, 0xc3, 0xcc, 0x51, 0x5d, 0xee, 0xc7, 0x17,
+ 0x27, 0x5c, 0xcd, 0xf6, 0x76, 0xfa, 0xd5, 0x2d, 0xc6, 0x48, 0x42, 0xc2, 0xdf, 0x69, 0x60, 0xd1,
+ 0xf2, 0x3d, 0x16, 0xb8, 0x84, 0xde, 0xa5, 0x7e, 0xd0, 0x55, 0xdd, 0x6a, 0x67, 0x6a, 0x42, 0xb9,
+ 0x91, 0x46, 0x35, 0x2b, 0xc3, 0x41, 0x6d, 0x71, 0x44, 0x84, 0x46, 0xfd, 0xd6, 0xff, 0xae, 0x01,
+ 0x98, 0x9d, 0x08, 0x1b, 0xa0, 0xd8, 0x11, 0x1f, 0xf2, 0x66, 0x87, 0xfb, 0x18, 0xbf, 0x7a, 0xef,
+ 0x46, 0x0a, 0x94, 0xd8, 0x88, 0x72, 0x46, 0xc9, 0x3e, 0x76, 0x70, 0xaa, 0x57, 0xaa, 0xfd, 0x8d,
+ 0xcb, 0x19, 0x1a, 0x37, 0x40, 0xd9, 0x39, 0xf0, 0xfb, 0xa0, 0x24, 0xaf, 0xf1, 0x43, 0xe7, 0x80,
+ 0xb0, 0xf0, 0x59, 0x5b, 0x48, 0xba, 0x44, 0x3b, 0x51, 0xa1, 0xb4, 0x5d, 0xfd, 0xbf, 0x1a, 0x58,
+ 0x50, 0x8f, 0x05, 0xe8, 0x81, 0x79, 0x0f, 0x73, 0xbb, 0x47, 0x14, 0x57, 0x9e, 0xea, 0x79, 0xb7,
+ 0x23, 0x91, 0xe2, 0xf6, 0x0f, 0x04, 0x97, 0x0d, 0x65, 0x48, 0x79, 0x81, 0xc7, 0x60, 0x9e, 0x9c,
+ 0xfa, 0xdc, 0x8e, 0x1e, 0xaf, 0x97, 0xf5, 0x5b, 0x8d, 0xf4, 0xb5, 0x29, 0x91, 0x91, 0xf2, 0x50,
+ 0xff, 0x5a, 0x03, 0x20, 0x31, 0x79, 0xd9, 0x4d, 0x7b, 0x1f, 0x14, 0x2d, 0x27, 0x60, 0x9c, 0xd0,
+ 0xad, 0x8f, 0xa2, 0xdb, 0x26, 0xb6, 0x70, 0x23, 0x12, 0xa2, 0x44, 0x0f, 0x3f, 0x00, 0x79, 0x1c,
+ 0xf0, 0x23, 0x75, 0xdd, 0x74, 0x71, 0x64, 0x9b, 0x01, 0x3f, 0x7a, 0x2e, 0x4a, 0x46, 0xc0, 0x8f,
+ 0xe2, 0x4d, 0x93, 0x56, 0x99, 0x3a, 0x94, 0xbf, 0xc4, 0x3a, 0x54, 0xff, 0x7c, 0x19, 0x2c, 0x8d,
+ 0x26, 0x1e, 0x7e, 0x90, 0x22, 0xfd, 0x9a, 0x6c, 0x73, 0xf1, 0xf3, 0x77, 0x02, 0xf1, 0x8f, 0xd6,
+ 0x32, 0x7b, 0xae, 0xb5, 0x8c, 0x53, 0xc7, 0xdc, 0x9b, 0xa0, 0x8e, 0x93, 0xdf, 0x2a, 0xf9, 0x37,
+ 0xfb, 0x56, 0xf9, 0xe6, 0xd0, 0xff, 0x3f, 0x8e, 0x93, 0xe2, 0x79, 0x49, 0xde, 0x3e, 0xbb, 0xbc,
+ 0xbb, 0x7f, 0x39, 0xb4, 0x78, 0xe1, 0x92, 0x68, 0x71, 0xfa, 0xa5, 0x51, 0x78, 0x5d, 0x2f, 0x8d,
+ 0x09, 0xdc, 0xbb, 0xf8, 0x1a, 0xb8, 0x77, 0x1d, 0xcc, 0xbb, 0xf8, 0xb4, 0xd9, 0x21, 0x92, 0xd9,
+ 0x17, 0xc3, 0xc2, 0xd7, 0x92, 0x12, 0xa4, 0x34, 0xff, 0x77, 0x7e, 0x3e, 0x99, 0xe4, 0x96, 0x5f,
+ 0x89, 0xe4, 0x4e, 0xe4, 0xfa, 0x8b, 0x53, 0x72, 0xfd, 0xa5, 0x73, 0x73, 0xfd, 0xe5, 0x29, 0xb8,
+ 0xfe, 0x7b, 0x60, 0xc1, 0xc5, 0xa7, 0x2d, 0xa6, 0xe8, 0x79, 0xde, 0x2c, 0x09, 0x0a, 0xd6, 0x0a,
+ 0x45, 0x28, 0xd2, 0x89, 0xc0, 0x5c, 0x7c, 0x6a, 0xf6, 0x39, 0x11, 0xdc, 0x3c, 0xa6, 0xf1, 0x2d,
+ 0x25, 0x43, 0xb1, 0x56, 0x01, 0xb6, 0x83, 0x7d, 0x26, 0x49, 0x79, 0x02, 0x28, 0x44, 0x28, 0xd2,
+ 0x5d, 0x94, 0x8a, 0xc3, 0x6d, 0xb0, 0x46, 0xf1, 0x21, 0xbf, 0x47, 0x30, 0xe5, 0xfb, 0x04, 0xf3,
+ 0x3d, 0xdb, 0x25, 0x7e, 0xc0, 0xf5, 0xb5, 0xb8, 0x01, 0xac, 0xa1, 0x09, 0x7a, 0x34, 0x71, 0x16,
+ 0xdc, 0x02, 0xab, 0x42, 0xbe, 0x29, 0xae, 0xb0, 0xed, 0x7b, 0x11, 0xd8, 0x5b, 0x12, 0xec, 0xed,
+ 0xe1, 0xa0, 0xb6, 0x8a, 0xb2, 0x6a, 0x34, 0x69, 0x0e, 0xfc, 0x09, 0x58, 0x11, 0xe2, 0x6d, 0x82,
+ 0x19, 0x89, 0x70, 0xbe, 0x15, 0xd2, 0x6a, 0x71, 0x12, 0xd1, 0x98, 0x0e, 0x65, 0xac, 0xe1, 0x06,
+ 0xa8, 0x08, 0xd9, 0x86, 0xef, 0xba, 0x76, 0xbc, 0xae, 0xb7, 0x25, 0x84, 0x2c, 0xe4, 0x68, 0x5c,
+ 0x89, 0xb2, 0xf6, 0xd3, 0x3f, 0x55, 0xfe, 0x34, 0x0b, 0x56, 0x27, 0x34, 0x35, 0xb1, 0x3e, 0xc6,
+ 0x7d, 0x8a, 0x3b, 0x24, 0x39, 0xda, 0x5a, 0xb2, 0xbe, 0xf6, 0x98, 0x0e, 0x65, 0xac, 0xe1, 0x13,
+ 0x00, 0xc2, 0xe6, 0xdf, 0xf2, 0x0f, 0x94, 0x63, 0xf3, 0x8e, 0xd8, 0xea, 0x66, 0x2c, 0x7d, 0x3e,
+ 0xa8, 0xdd, 0x98, 0xf4, 0x17, 0x49, 0x14, 0x0f, 0x7f, 0xec, 0x3b, 0x81, 0x4b, 0x92, 0x09, 0x28,
+ 0x05, 0x09, 0x7f, 0x0e, 0x40, 0x4f, 0xea, 0xdb, 0xf6, 0xaf, 0xa2, 0xe6, 0xfe, 0xc2, 0xdf, 0xda,
+ 0x8d, 0xe8, 0xdf, 0x1c, 0xe3, 0xa7, 0x01, 0xf6, 0xb8, 0xb8, 0x1f, 0xf2, 0xec, 0x3d, 0x8e, 0x51,
+ 0x50, 0x0a, 0xd1, 0x34, 0x9e, 0x3e, 0xab, 0xce, 0x7c, 0xf1, 0xac, 0x3a, 0xf3, 0xe5, 0xb3, 0xea,
+ 0xcc, 0xaf, 0x87, 0x55, 0xed, 0xe9, 0xb0, 0xaa, 0x7d, 0x31, 0xac, 0x6a, 0x5f, 0x0e, 0xab, 0xda,
+ 0x57, 0xc3, 0xaa, 0xf6, 0xf9, 0xd7, 0xd5, 0x99, 0x4f, 0x0b, 0x51, 0x5b, 0xf9, 0x5f, 0x00, 0x00,
+ 0x00, 0xff, 0xff, 0xdf, 0x1b, 0x03, 0x56, 0xd9, 0x1d, 0x00, 0x00,
}
func (m *BusConfig) Marshal() (dAtA []byte, err error) {
@@ -542,6 +613,18 @@ func (m *BusConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.Kafka != nil {
+ {
+ size, err := m.Kafka.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
if m.JetStream != nil {
{
size, err := m.JetStream.MarshalToSizedBuffer(dAtA[:i])
@@ -739,6 +822,18 @@ func (m *EventBusSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.Kafka != nil {
+ {
+ size, err := m.Kafka.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
if m.JetStream != nil {
{
size, err := m.JetStream.MarshalToSizedBuffer(dAtA[:i])
@@ -1076,6 +1171,121 @@ func (m *JetStreamConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *KafkaBus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KafkaBus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KafkaBus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ConsumerGroup != nil {
+ {
+ size, err := m.ConsumerGroup.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.SASL != nil {
+ {
+ size, err := m.SASL.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.TLS != nil {
+ {
+ size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Topic)
+ copy(dAtA[i:], m.Topic)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Topic)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.URL)
+ copy(dAtA[i:], m.URL)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *KafkaConsumerGroup) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KafkaConsumerGroup) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KafkaConsumerGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.StartOldest {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ i -= len(m.RebalanceStrategy)
+ copy(dAtA[i:], m.RebalanceStrategy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.RebalanceStrategy)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.GroupName)
+ copy(dAtA[i:], m.GroupName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupName)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
func (m *NATSBus) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -1498,6 +1708,10 @@ func (m *BusConfig) Size() (n int) {
l = m.JetStream.Size()
n += 1 + l + sovGenerated(uint64(l))
}
+ if m.Kafka != nil {
+ l = m.Kafka.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -1564,6 +1778,10 @@ func (m *EventBusSpec) Size() (n int) {
l = m.JetStream.Size()
n += 1 + l + sovGenerated(uint64(l))
}
+ if m.Kafka != nil {
+ l = m.Kafka.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -1684,6 +1902,47 @@ func (m *JetStreamConfig) Size() (n int) {
return n
}
+func (m *KafkaBus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.URL)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Topic)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Version)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.TLS != nil {
+ l = m.TLS.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.SASL != nil {
+ l = m.SASL.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ConsumerGroup != nil {
+ l = m.ConsumerGroup.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *KafkaConsumerGroup) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.GroupName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.RebalanceStrategy)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ return n
+}
+
func (m *NATSBus) Size() (n int) {
if m == nil {
return 0
@@ -1857,6 +2116,7 @@ func (this *BusConfig) String() string {
s := strings.Join([]string{`&BusConfig{`,
`NATS:` + strings.Replace(this.NATS.String(), "NATSConfig", "NATSConfig", 1) + `,`,
`JetStream:` + strings.Replace(this.JetStream.String(), "JetStreamConfig", "JetStreamConfig", 1) + `,`,
+ `Kafka:` + strings.Replace(this.Kafka.String(), "KafkaBus", "KafkaBus", 1) + `,`,
`}`,
}, "")
return s
@@ -1908,6 +2168,7 @@ func (this *EventBusSpec) String() string {
s := strings.Join([]string{`&EventBusSpec{`,
`NATS:` + strings.Replace(this.NATS.String(), "NATSBus", "NATSBus", 1) + `,`,
`JetStream:` + strings.Replace(this.JetStream.String(), "JetStreamBus", "JetStreamBus", 1) + `,`,
+ `Kafka:` + strings.Replace(this.Kafka.String(), "KafkaBus", "KafkaBus", 1) + `,`,
`}`,
}, "")
return s
@@ -1983,6 +2244,33 @@ func (this *JetStreamConfig) String() string {
}, "")
return s
}
+func (this *KafkaBus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KafkaBus{`,
+ `URL:` + fmt.Sprintf("%v", this.URL) + `,`,
+ `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`,
+ `Version:` + fmt.Sprintf("%v", this.Version) + `,`,
+ `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`,
+ `SASL:` + strings.Replace(fmt.Sprintf("%v", this.SASL), "SASLConfig", "common.SASLConfig", 1) + `,`,
+ `ConsumerGroup:` + strings.Replace(this.ConsumerGroup.String(), "KafkaConsumerGroup", "KafkaConsumerGroup", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *KafkaConsumerGroup) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&KafkaConsumerGroup{`,
+ `GroupName:` + fmt.Sprintf("%v", this.GroupName) + `,`,
+ `RebalanceStrategy:` + fmt.Sprintf("%v", this.RebalanceStrategy) + `,`,
+ `StartOldest:` + fmt.Sprintf("%v", this.StartOldest) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *NATSBus) String() string {
if this == nil {
return "nil"
@@ -2180,6 +2468,42 @@ func (m *BusConfig) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kafka", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Kafka == nil {
+ m.Kafka = &KafkaBus{}
+ }
+ if err := m.Kafka.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -2719,17 +3043,53 @@ func (m *EventBusSpec) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kafka", wireType)
}
- if (skippy < 0) || (iNdEx+skippy) < 0 {
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Kafka == nil {
+ m.Kafka = &KafkaBus{}
+ }
+ if err := m.Kafka.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
@@ -3770,6 +4130,394 @@ func (m *JetStreamConfig) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *KafkaBus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KafkaBus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KafkaBus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.URL = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Topic = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.TLS == nil {
+ m.TLS = &common.TLSConfig{}
+ }
+ if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SASL", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SASL == nil {
+ m.SASL = &common.SASLConfig{}
+ }
+ if err := m.SASL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConsumerGroup", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ConsumerGroup == nil {
+ m.ConsumerGroup = &KafkaConsumerGroup{}
+ }
+ if err := m.ConsumerGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *KafkaConsumerGroup) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KafkaConsumerGroup: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KafkaConsumerGroup: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GroupName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.GroupName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RebalanceStrategy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RebalanceStrategy = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartOldest", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.StartOldest = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *NATSBus) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
diff --git a/pkg/apis/eventbus/v1alpha1/generated.proto b/pkg/apis/eventbus/v1alpha1/generated.proto
index e0d03920f5..7320044cca 100644
--- a/pkg/apis/eventbus/v1alpha1/generated.proto
+++ b/pkg/apis/eventbus/v1alpha1/generated.proto
@@ -37,6 +37,9 @@ message BusConfig {
// +optional
optional JetStreamConfig jetstream = 2;
+
+ // +optional
+ optional KafkaBus kafka = 3;
}
// ContainerTemplate defines customized spec for a container
@@ -79,6 +82,10 @@ message EventBusSpec {
// +optional
optional JetStreamBus jetstream = 2;
+
+ // +optional
+ // Kafka eventbus
+ optional KafkaBus kafka = 3;
}
// EventBusStatus holds the status of the eventbus resource
@@ -201,6 +208,46 @@ message JetStreamConfig {
optional string streamConfig = 3;
}
+// KafkaBus holds the KafkaBus EventBus information
+message KafkaBus {
+ // URL to kafka cluster, multiple URLs separated by comma
+ optional string url = 1;
+
+ // Topic name, defaults to {namespace_name}-{eventbus_name}
+ // +optional
+ optional string topic = 2;
+
+ // Kafka version, sarama defaults to the oldest supported stable version
+ // +optional
+ optional string version = 3;
+
+ // TLS configuration for the kafka client.
+ // +optional
+ optional github.com.argoproj.argo_events.pkg.apis.common.TLSConfig tls = 4;
+
+ // SASL configuration for the kafka client
+ // +optional
+ optional github.com.argoproj.argo_events.pkg.apis.common.SASLConfig sasl = 5;
+
+ // Consumer group for kafka client
+ // +optional
+ optional KafkaConsumerGroup consumerGroup = 6;
+}
+
+message KafkaConsumerGroup {
+ // Consumer group name, defaults to {namespace_name}-{sensor_name}
+ // +optional
+ optional string groupName = 1;
+
+ // Rebalance strategy can be one of: sticky, roundrobin, range. Range is the default.
+ // +optional
+ optional string rebalanceStrategy = 2;
+
+ // When starting up a new group do we want to start from the oldest event (true) or the newest event (false), defaults to false
+ // +optional
+ optional bool startOldest = 3;
+}
+
// NATSBus holds the NATS eventbus information
message NATSBus {
// Native means to bring up a native NATS service
diff --git a/pkg/apis/eventbus/v1alpha1/kafka_eventbus.go b/pkg/apis/eventbus/v1alpha1/kafka_eventbus.go
new file mode 100644
index 0000000000..fa4f754963
--- /dev/null
+++ b/pkg/apis/eventbus/v1alpha1/kafka_eventbus.go
@@ -0,0 +1,38 @@
+package v1alpha1
+
+import (
+ apicommon "github.com/argoproj/argo-events/pkg/apis/common"
+)
+
+// KafkaBus holds the KafkaBus EventBus information
+type KafkaBus struct {
+ // URL to kafka cluster, multiple URLs separated by comma
+ URL string `json:"url,omitempty" protobuf:"bytes,1,opt,name=url"`
+ // Topic name, defaults to {namespace_name}-{eventbus_name}
+ // +optional
+ Topic string `json:"topic,omitempty" protobuf:"bytes,2,opt,name=topic"`
+ // Kafka version, sarama defaults to the oldest supported stable version
+ // +optional
+ Version string `json:"version,omitempty" protobuf:"bytes,3,opt,name=version"`
+ // TLS configuration for the kafka client.
+ // +optional
+ TLS *apicommon.TLSConfig `json:"tls,omitempty" protobuf:"bytes,4,opt,name=tls"`
+ // SASL configuration for the kafka client
+ // +optional
+ SASL *apicommon.SASLConfig `json:"sasl,omitempty" protobuf:"bytes,5,opt,name=sasl"`
+ // Consumer group for kafka client
+ // +optional
+ ConsumerGroup *KafkaConsumerGroup `json:"consumerGroup,omitempty" protobuf:"bytes,6,opt,name=consumerGroup"`
+}
+
+type KafkaConsumerGroup struct {
+ // Consumer group name, defaults to {namespace_name}-{sensor_name}
+ // +optional
+ GroupName string `json:"groupName,omitempty" protobuf:"bytes,1,opt,name=groupName"`
+ // Rebalance strategy can be one of: sticky, roundrobin, range. Range is the default.
+ // +optional
+ RebalanceStrategy string `json:"rebalanceStrategy,omitempty" protobuf:"bytes,2,opt,name=rebalanceStrategy"`
+ // When starting up a new group do we want to start from the oldest event (true) or the newest event (false), defaults to false
+ // +optional
+ StartOldest bool `json:"startOldest,omitempty" default:"false" protobuf:"bytes,3,opt,name=startOldest"`
+}
diff --git a/pkg/apis/eventbus/v1alpha1/openapi_generated.go b/pkg/apis/eventbus/v1alpha1/openapi_generated.go
index b6a1a9f68a..cb16259563 100644
--- a/pkg/apis/eventbus/v1alpha1/openapi_generated.go
+++ b/pkg/apis/eventbus/v1alpha1/openapi_generated.go
@@ -38,6 +38,8 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
"github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.EventBusStatus": schema_pkg_apis_eventbus_v1alpha1_EventBusStatus(ref),
"github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.JetStreamBus": schema_pkg_apis_eventbus_v1alpha1_JetStreamBus(ref),
"github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.JetStreamConfig": schema_pkg_apis_eventbus_v1alpha1_JetStreamConfig(ref),
+ "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.KafkaBus": schema_pkg_apis_eventbus_v1alpha1_KafkaBus(ref),
+ "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.KafkaConsumerGroup": schema_pkg_apis_eventbus_v1alpha1_KafkaConsumerGroup(ref),
"github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.NATSBus": schema_pkg_apis_eventbus_v1alpha1_NATSBus(ref),
"github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.NATSConfig": schema_pkg_apis_eventbus_v1alpha1_NATSConfig(ref),
"github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.NativeStrategy": schema_pkg_apis_eventbus_v1alpha1_NativeStrategy(ref),
@@ -62,11 +64,16 @@ func schema_pkg_apis_eventbus_v1alpha1_BusConfig(ref common.ReferenceCallback) c
Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.JetStreamConfig"),
},
},
+ "kafka": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.KafkaBus"),
+ },
+ },
},
},
},
Dependencies: []string{
- "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.JetStreamConfig", "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.NATSConfig"},
+ "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.JetStreamConfig", "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.KafkaBus", "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.NATSConfig"},
}
}
@@ -217,11 +224,17 @@ func schema_pkg_apis_eventbus_v1alpha1_EventBusSpec(ref common.ReferenceCallback
Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.JetStreamBus"),
},
},
+ "kafka": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Kafka eventbus",
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.KafkaBus"),
+ },
+ },
},
},
},
Dependencies: []string{
- "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.JetStreamBus", "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.NATSBus"},
+ "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.JetStreamBus", "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.KafkaBus", "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.NATSBus"},
}
}
@@ -477,6 +490,93 @@ func schema_pkg_apis_eventbus_v1alpha1_JetStreamConfig(ref common.ReferenceCallb
}
}
+func schema_pkg_apis_eventbus_v1alpha1_KafkaBus(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "KafkaBus holds the KafkaBus EventBus information",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "url": {
+ SchemaProps: spec.SchemaProps{
+ Description: "URL to kafka cluster, multiple URLs separated by comma",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "topic": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Topic name, defaults to {namespace_name}-{eventbus_name}",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "version": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Kafka version, sarama defaults to the oldest supported stable version",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "tls": {
+ SchemaProps: spec.SchemaProps{
+ Description: "TLS configuration for the kafka client.",
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.TLSConfig"),
+ },
+ },
+ "sasl": {
+ SchemaProps: spec.SchemaProps{
+ Description: "SASL configuration for the kafka client",
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.SASLConfig"),
+ },
+ },
+ "consumerGroup": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Consumer group for kafka client",
+ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.KafkaConsumerGroup"),
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "github.com/argoproj/argo-events/pkg/apis/common.SASLConfig", "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig", "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.KafkaConsumerGroup"},
+ }
+}
+
+func schema_pkg_apis_eventbus_v1alpha1_KafkaConsumerGroup(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "groupName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Consumer group name, defaults to {namespace_name}-{sensor_name}",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "rebalanceStrategy": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Rebalance strategy can be one of: sticky, roundrobin, range. Range is the default.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "startOldest": {
+ SchemaProps: spec.SchemaProps{
+ Description: "When starting up a new group do we want to start from the oldest event (true) or the newest event (false), defaults to false",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
func schema_pkg_apis_eventbus_v1alpha1_NATSBus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
diff --git a/pkg/apis/eventbus/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/eventbus/v1alpha1/zz_generated.deepcopy.go
index 68524f4cd5..61ce1b4ca9 100644
--- a/pkg/apis/eventbus/v1alpha1/zz_generated.deepcopy.go
+++ b/pkg/apis/eventbus/v1alpha1/zz_generated.deepcopy.go
@@ -40,6 +40,11 @@ func (in *BusConfig) DeepCopyInto(out *BusConfig) {
*out = new(JetStreamConfig)
(*in).DeepCopyInto(*out)
}
+ if in.Kafka != nil {
+ in, out := &in.Kafka, &out.Kafka
+ *out = new(KafkaBus)
+ (*in).DeepCopyInto(*out)
+ }
return
}
@@ -149,6 +154,11 @@ func (in *EventBusSpec) DeepCopyInto(out *EventBusSpec) {
*out = new(JetStreamBus)
(*in).DeepCopyInto(*out)
}
+ if in.Kafka != nil {
+ in, out := &in.Kafka, &out.Kafka
+ *out = new(KafkaBus)
+ (*in).DeepCopyInto(*out)
+ }
return
}
@@ -301,6 +311,53 @@ func (in *JetStreamConfig) DeepCopy() *JetStreamConfig {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KafkaBus) DeepCopyInto(out *KafkaBus) {
+ *out = *in
+ if in.TLS != nil {
+ in, out := &in.TLS, &out.TLS
+ *out = new(common.TLSConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SASL != nil {
+ in, out := &in.SASL, &out.SASL
+ *out = new(common.SASLConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ConsumerGroup != nil {
+ in, out := &in.ConsumerGroup, &out.ConsumerGroup
+ *out = new(KafkaConsumerGroup)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaBus.
+func (in *KafkaBus) DeepCopy() *KafkaBus {
+ if in == nil {
+ return nil
+ }
+ out := new(KafkaBus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KafkaConsumerGroup) DeepCopyInto(out *KafkaConsumerGroup) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaConsumerGroup.
+func (in *KafkaConsumerGroup) DeepCopy() *KafkaConsumerGroup {
+ if in == nil {
+ return nil
+ }
+ out := new(KafkaConsumerGroup)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NATSBus) DeepCopyInto(out *NATSBus) {
*out = *in
diff --git a/pkg/apis/events/event-data.go b/pkg/apis/events/event-data.go
index 6cc09acf21..0d283b8c9c 100644
--- a/pkg/apis/events/event-data.go
+++ b/pkg/apis/events/event-data.go
@@ -202,6 +202,8 @@ type GitLabEventData struct {
type KafkaEventData struct {
// Topic refers to the Kafka topic
Topic string `json:"topic"`
+ // Key refers to the Kafka key
+ Key string `json:"key"`
// Partition refers to the Kafka partition
Partition int `json:"partition"`
// Body refers to the message value
diff --git a/sensors/listener.go b/sensors/listener.go
index 300ba3af2a..1fc1027c99 100644
--- a/sensors/listener.go
+++ b/sensors/listener.go
@@ -59,6 +59,12 @@ func (sensorCtx *SensorContext) Start(ctx context.Context) error {
replicas := int(sensorCtx.sensor.Spec.GetReplicas())
leasename := fmt.Sprintf("sensor-%s", sensorCtx.sensor.Name)
+ // sensor for kafka eventbus can be scaled horizontally,
+ // therefore does not require an elector
+ if sensorCtx.eventBusConfig.Kafka != nil {
+ return sensorCtx.listenEvents(ctx)
+ }
+
elector, err := leaderelection.NewElector(ctx, *sensorCtx.eventBusConfig, clusterName, replicas, sensorCtx.sensor.Namespace, leasename, sensorCtx.hostname)
if err != nil {
log.Errorw("failed to get an elector", zap.Error(err))
@@ -106,7 +112,7 @@ func (sensorCtx *SensorContext) listenEvents(ctx context.Context) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
- ebDriver, err := eventbus.GetSensorDriver(logging.WithLogger(ctx, logger), *sensorCtx.eventBusConfig, sensorCtx.sensor)
+ ebDriver, err := eventbus.GetSensorDriver(logging.WithLogger(ctx, logger), *sensorCtx.eventBusConfig, sensorCtx.sensor, sensorCtx.hostname)
if err != nil {
return err
}
@@ -156,7 +162,7 @@ func (sensorCtx *SensorContext) listenEvents(ctx context.Context) error {
var conn eventbuscommon.TriggerConnection
err = common.DoWithRetry(&common.DefaultBackoff, func() error {
var err error
- conn, err = ebDriver.Connect(trigger.Template.Name, depExpression, deps)
+ conn, err = ebDriver.Connect(ctx, trigger.Template.Name, depExpression, deps, trigger.AtLeastOnce)
triggerLogger.Debugf("just created connection %v, %+v", &conn, conn)
return err
})
@@ -293,13 +299,13 @@ func (sensorCtx *SensorContext) listenEvents(ctx context.Context) error {
return
case <-ticker.C:
if conn == nil || conn.IsClosed() {
- triggerLogger.Info("NATS connection lost, reconnecting...")
- conn, err = ebDriver.Connect(trigger.Template.Name, depExpression, deps)
+ triggerLogger.Info("EventBus connection lost, reconnecting...")
+ conn, err = ebDriver.Connect(ctx, trigger.Template.Name, depExpression, deps, trigger.AtLeastOnce)
if err != nil {
triggerLogger.Errorw("failed to reconnect to eventbus", zap.Any("connection", conn), zap.Error(err))
continue
}
- triggerLogger.Infow("reconnected to NATS server.", zap.Any("connection", conn))
+ triggerLogger.Infow("reconnected to EventBus.", zap.Any("connection", conn))
if atomic.LoadUint32(&subLock) == 1 {
triggerLogger.Debug("acquired sublock, instructing trigger to shutdown subscription")
diff --git a/test/e2e/fixtures/e2e_suite.go b/test/e2e/fixtures/e2e_suite.go
index 457ee3ec0d..ea5f78f410 100644
--- a/test/e2e/fixtures/e2e_suite.go
+++ b/test/e2e/fixtures/e2e_suite.go
@@ -52,6 +52,14 @@ metadata:
spec:
jetstream:
version: latest`
+
+ E2EEventBusKafka = `apiVersion: argoproj.io/v1alpha1
+kind: EventBus
+metadata:
+ name: default
+spec:
+ kafka:
+ url: kafka:9092`
)
type E2ESuite struct {
@@ -170,6 +178,8 @@ func GetBusDriverSpec() string {
x := strings.ToUpper(os.Getenv("EventBusDriver"))
if x == "JETSTREAM" {
return E2EEventBusJetstream
+ } else if x == "KAFKA" {
+ return E2EEventBusKafka
}
return E2EEventBusSTAN
}
diff --git a/test/e2e/fixtures/when.go b/test/e2e/fixtures/when.go
index c77fe02b9d..9be842f216 100644
--- a/test/e2e/fixtures/when.go
+++ b/test/e2e/fixtures/when.go
@@ -153,8 +153,10 @@ func (w *When) WaitForEventBusReady() *When {
if err := testutil.WaitForEventBusReady(ctx, w.eventBusClient, w.eventBus.Name, defaultTimeout); err != nil {
w.t.Fatal(err)
}
- if err := testutil.WaitForEventBusStatefulSetReady(ctx, w.kubeClient, Namespace, w.eventBus.Name, 2*time.Minute); err != nil {
- w.t.Fatal(err)
+ if w.eventBus.Spec.Kafka == nil { // not needed for kafka (exotic only)
+ if err := testutil.WaitForEventBusStatefulSetReady(ctx, w.kubeClient, Namespace, w.eventBus.Name, 2*time.Minute); err != nil {
+ w.t.Fatal(err)
+ }
}
return w
}
diff --git a/test/e2e/functional_test.go b/test/e2e/functional_test.go
index 9046fa8093..3a549ca1f1 100644
--- a/test/e2e/functional_test.go
+++ b/test/e2e/functional_test.go
@@ -407,11 +407,10 @@ func (s *FunctionalSuite) TestAtLeastOnce() {
// Send an event to a sensor with a failing trigger and make sure it doesn't ACK it.
// Delete the sensor and launch sensor with same name and non-failing trigger so it ACKS it.
- // Start EventSource
-
if fixtures.GetBusDriverSpec() == fixtures.E2EEventBusSTAN {
s.T().SkipNow() // Skipping because AtLeastOnce does not apply for NATS.
}
+
w1 := s.Given().EventSource("@testdata/es-webhook.yaml").
When().
CreateEventSource().
@@ -425,7 +424,7 @@ func (s *FunctionalSuite) TestAtLeastOnce() {
defer w1.Then().EventSourcePodPortForward(12006, 12000).
TerminateAllPodPortForwards()
- w2 := s.Given().Sensor("@testdata/sensor-atleastonce-failing.yaml").
+ w2 := s.Given().Sensor("@testdata/sensor-atleastonce-fail.yaml").
When().
CreateSensor().
WaitForSensorReady()
@@ -437,11 +436,13 @@ func (s *FunctionalSuite) TestAtLeastOnce() {
Status(200)
w1.Then().ExpectEventSourcePodLogContains(LogPublishEventSuccessful, util.PodLogCheckOptionWithCount(1))
- w2.Then().ExpectSensorPodLogContains("InProgess")
+ w2.Then().ExpectSensorPodLogContains("Making a http request...")
+ time.Sleep(5 * time.Second) // make sure we defintely attempt to trigger
+
w2.DeleteSensor()
time.Sleep(10 * time.Second)
- w3 := s.Given().Sensor("@testdata/sensor-atleastonce-triggerable.yaml").
+ w3 := s.Given().Sensor("@testdata/sensor-atleastonce-succeed.yaml").
When().
CreateSensor().
WaitForSensorReady()
@@ -459,7 +460,6 @@ func (s *FunctionalSuite) TestAtMostOnce() {
// Delete the sensor and launch sensor with same name and non-failing trigger
// to see that the event doesn't come through.
- // Start EventSource
w1 := s.Given().EventSource("@testdata/es-webhook.yaml").
When().
CreateEventSource().
@@ -471,7 +471,7 @@ func (s *FunctionalSuite) TestAtMostOnce() {
defer w1.Then().EventSourcePodPortForward(12007, 12000).
TerminateAllPodPortForwards()
- w2 := s.Given().Sensor("@testdata/sensor-atmostonce-failing.yaml").
+ w2 := s.Given().Sensor("@testdata/sensor-atmostonce-fail.yaml").
When().
CreateSensor().
WaitForSensorReady()
@@ -483,13 +483,13 @@ func (s *FunctionalSuite) TestAtMostOnce() {
Status(200)
w1.Then().ExpectEventSourcePodLogContains(LogPublishEventSuccessful, util.PodLogCheckOptionWithCount(1))
- w2.Then().ExpectSensorPodLogContains("Triggering actions")
- time.Sleep(3 * time.Second)
+ w2.Then().ExpectSensorPodLogContains("Making a http request...")
+ time.Sleep(5 * time.Second) // make sure we defintely attempt to trigger
w2.DeleteSensor()
time.Sleep(10 * time.Second)
- w3 := s.Given().Sensor("@testdata/sensor-atmostonce-triggerable.yaml").
+ w3 := s.Given().Sensor("@testdata/sensor-atmostonce-succeed.yaml").
When().
CreateSensor().
WaitForSensorReady()
@@ -507,8 +507,6 @@ func (s *FunctionalSuite) TestMultipleSensorAtLeastOnceTrigger() {
// Then send the other part of the condition and verify that only one triggers
// With AtLeastOnce flag set.
- // Start EventSource
-
w1 := s.Given().EventSource("@testdata/es-multi-sensor.yaml").
When().
CreateEventSource().
@@ -562,7 +560,6 @@ func (s *FunctionalSuite) TestMultipleSensorAtLeastOnceTrigger() {
// Verify trigger occurs for first Sensor and not second
w2.Then().ExpectSensorPodLogContains(LogTriggerActionSuccessful("log-trigger-1-atleastonce"))
w3.Then().ExpectSensorPodLogContains(LogTriggerActionSuccessful("log-trigger-1-atleastonce"), util.PodLogCheckOptionWithCount(0))
-
}
func (s *FunctionalSuite) TestTriggerSpecChange() {
diff --git a/test/e2e/testdata/es-calendar-ha.yaml b/test/e2e/testdata/es-calendar-ha.yaml
index 4098eb32ce..11b3e1e731 100644
--- a/test/e2e/testdata/es-calendar-ha.yaml
+++ b/test/e2e/testdata/es-calendar-ha.yaml
@@ -8,4 +8,4 @@ spec:
serviceAccountName: argo-events-sa
calendar:
example:
- interval: 2s
+ interval: 10s
diff --git a/test/e2e/testdata/es-calendar-metrics.yaml b/test/e2e/testdata/es-calendar-metrics.yaml
index de65f53fca..9e8ce5331e 100644
--- a/test/e2e/testdata/es-calendar-metrics.yaml
+++ b/test/e2e/testdata/es-calendar-metrics.yaml
@@ -3,6 +3,8 @@ kind: EventSource
metadata:
name: e2e-calendar-metrics
spec:
+ template:
+ serviceAccountName: argo-events-sa
calendar:
example:
- interval: 2s
+ interval: 10s
diff --git a/test/e2e/testdata/es-calendar.yaml b/test/e2e/testdata/es-calendar.yaml
index 17c9e4f364..f4c3365635 100644
--- a/test/e2e/testdata/es-calendar.yaml
+++ b/test/e2e/testdata/es-calendar.yaml
@@ -3,6 +3,8 @@ kind: EventSource
metadata:
name: e2e-calendar
spec:
+ template:
+ serviceAccountName: argo-events-sa
calendar:
example:
- interval: 2s
+ interval: 10s
diff --git a/test/e2e/testdata/es-durable-consumer.yaml b/test/e2e/testdata/es-durable-consumer.yaml
index f7a1dd877e..01c0790514 100644
--- a/test/e2e/testdata/es-durable-consumer.yaml
+++ b/test/e2e/testdata/es-durable-consumer.yaml
@@ -3,6 +3,8 @@ kind: EventSource
metadata:
name: e2e-durable-consumer
spec:
+ template:
+ serviceAccountName: argo-events-sa
webhook:
example1:
port: "12000"
diff --git a/test/e2e/testdata/es-multi-dep.yaml b/test/e2e/testdata/es-multi-dep.yaml
index 6256d99fbe..b62e7208fc 100644
--- a/test/e2e/testdata/es-multi-dep.yaml
+++ b/test/e2e/testdata/es-multi-dep.yaml
@@ -3,6 +3,8 @@ kind: EventSource
metadata:
name: e2e-multi-dep
spec:
+ template:
+ serviceAccountName: argo-events-sa
webhook:
example1:
port: "12000"
diff --git a/test/e2e/testdata/es-multi-sensor.yaml b/test/e2e/testdata/es-multi-sensor.yaml
index 9b3ffdbc9c..f6a26eda40 100644
--- a/test/e2e/testdata/es-multi-sensor.yaml
+++ b/test/e2e/testdata/es-multi-sensor.yaml
@@ -3,6 +3,8 @@ kind: EventSource
metadata:
name: e2e-multi-sensor
spec:
+ template:
+ serviceAccountName: argo-events-sa
webhook:
example1:
port: "12000"
diff --git a/test/e2e/testdata/es-test-metrics-webhook.yaml b/test/e2e/testdata/es-test-metrics-webhook.yaml
index 5c4e970ab5..8dbdc4ee2f 100644
--- a/test/e2e/testdata/es-test-metrics-webhook.yaml
+++ b/test/e2e/testdata/es-test-metrics-webhook.yaml
@@ -3,6 +3,8 @@ kind: EventSource
metadata:
name: e2e-test-metrics-webhook
spec:
+ template:
+ serviceAccountName: argo-events-sa
webhook:
example:
port: "12000"
diff --git a/test/e2e/testdata/es-trigger-spec-change.yaml b/test/e2e/testdata/es-trigger-spec-change.yaml
index a9083bf719..03ad5070d8 100644
--- a/test/e2e/testdata/es-trigger-spec-change.yaml
+++ b/test/e2e/testdata/es-trigger-spec-change.yaml
@@ -3,6 +3,8 @@ kind: EventSource
metadata:
name: e2e-trigger-spec-change
spec:
+ template:
+ serviceAccountName: argo-events-sa
webhook:
example1:
port: "12000"
diff --git a/test/e2e/testdata/es-webhook.yaml b/test/e2e/testdata/es-webhook.yaml
index 257d0496ce..d17b00971e 100644
--- a/test/e2e/testdata/es-webhook.yaml
+++ b/test/e2e/testdata/es-webhook.yaml
@@ -3,6 +3,8 @@ kind: EventSource
metadata:
name: e2e-webhook
spec:
+ template:
+ serviceAccountName: argo-events-sa
webhook:
example:
port: "12000"
diff --git a/test/e2e/testdata/sensor-atleastonce-failing.yaml b/test/e2e/testdata/sensor-atleastonce-fail.yaml
similarity index 93%
rename from test/e2e/testdata/sensor-atleastonce-failing.yaml
rename to test/e2e/testdata/sensor-atleastonce-fail.yaml
index 9cbc030306..15a2fffb23 100644
--- a/test/e2e/testdata/sensor-atleastonce-failing.yaml
+++ b/test/e2e/testdata/sensor-atleastonce-fail.yaml
@@ -5,6 +5,7 @@ metadata:
spec:
replicas: 1
template:
+ serviceAccountName: argo-events-sa
container:
env:
- name: DEBUG_LOG
diff --git a/test/e2e/testdata/sensor-atleastonce-triggerable.yaml b/test/e2e/testdata/sensor-atleastonce-succeed.yaml
similarity index 91%
rename from test/e2e/testdata/sensor-atleastonce-triggerable.yaml
rename to test/e2e/testdata/sensor-atleastonce-succeed.yaml
index 89ae305c1c..a20a498533 100644
--- a/test/e2e/testdata/sensor-atleastonce-triggerable.yaml
+++ b/test/e2e/testdata/sensor-atleastonce-succeed.yaml
@@ -5,6 +5,7 @@ metadata:
spec:
replicas: 1
template:
+ serviceAccountName: argo-events-sa
container:
env:
- name: DEBUG_LOG
diff --git a/test/e2e/testdata/sensor-atmostonce-failing.yaml b/test/e2e/testdata/sensor-atmostonce-fail.yaml
similarity index 93%
rename from test/e2e/testdata/sensor-atmostonce-failing.yaml
rename to test/e2e/testdata/sensor-atmostonce-fail.yaml
index 51c9914587..bbfb3d7943 100644
--- a/test/e2e/testdata/sensor-atmostonce-failing.yaml
+++ b/test/e2e/testdata/sensor-atmostonce-fail.yaml
@@ -5,6 +5,7 @@ metadata:
spec:
replicas: 1
template:
+ serviceAccountName: argo-events-sa
container:
env:
- name: DEBUG_LOG
diff --git a/test/e2e/testdata/sensor-atmostonce-triggerable.yaml b/test/e2e/testdata/sensor-atmostonce-succeed.yaml
similarity index 90%
rename from test/e2e/testdata/sensor-atmostonce-triggerable.yaml
rename to test/e2e/testdata/sensor-atmostonce-succeed.yaml
index f8857bf7c4..93a4da0d95 100644
--- a/test/e2e/testdata/sensor-atmostonce-triggerable.yaml
+++ b/test/e2e/testdata/sensor-atmostonce-succeed.yaml
@@ -5,6 +5,7 @@ metadata:
spec:
replicas: 1
template:
+ serviceAccountName: argo-events-sa
container:
env:
- name: DEBUG_LOG
diff --git a/test/e2e/testdata/sensor-durable-consumer.yaml b/test/e2e/testdata/sensor-durable-consumer.yaml
index 16138237bd..d29c0063f1 100644
--- a/test/e2e/testdata/sensor-durable-consumer.yaml
+++ b/test/e2e/testdata/sensor-durable-consumer.yaml
@@ -5,6 +5,7 @@ metadata:
spec:
replicas: 1
template:
+ serviceAccountName: argo-events-sa
container:
env:
- name: DEBUG_LOG
diff --git a/test/e2e/testdata/sensor-log-ha.yaml b/test/e2e/testdata/sensor-log-ha.yaml
index f6efabe2be..d52ee3d832 100644
--- a/test/e2e/testdata/sensor-log-ha.yaml
+++ b/test/e2e/testdata/sensor-log-ha.yaml
@@ -4,6 +4,8 @@ metadata:
name: e2e-log-ha
spec:
replicas: 2
+ template:
+ serviceAccountName: argo-events-sa
dependencies:
- name: test-dep
eventSourceName: e2e-calendar-ha
diff --git a/test/e2e/testdata/sensor-log-metrics.yaml b/test/e2e/testdata/sensor-log-metrics.yaml
index ea8b0c946a..0b7d63b548 100644
--- a/test/e2e/testdata/sensor-log-metrics.yaml
+++ b/test/e2e/testdata/sensor-log-metrics.yaml
@@ -3,6 +3,8 @@ kind: Sensor
metadata:
name: e2e-log-metrics
spec:
+ template:
+ serviceAccountName: argo-events-sa
dependencies:
- name: test-dep
eventSourceName: e2e-calendar-metrics
diff --git a/test/e2e/testdata/sensor-log.yaml b/test/e2e/testdata/sensor-log.yaml
index 34ef0943b7..98f77b134e 100644
--- a/test/e2e/testdata/sensor-log.yaml
+++ b/test/e2e/testdata/sensor-log.yaml
@@ -3,6 +3,8 @@ kind: Sensor
metadata:
name: e2e-log
spec:
+ template:
+ serviceAccountName: argo-events-sa
dependencies:
- name: test-dep
eventSourceName: e2e-calendar
diff --git a/test/e2e/testdata/sensor-multi-dep.yaml b/test/e2e/testdata/sensor-multi-dep.yaml
index 73d50034da..9f6fe398fe 100644
--- a/test/e2e/testdata/sensor-multi-dep.yaml
+++ b/test/e2e/testdata/sensor-multi-dep.yaml
@@ -5,6 +5,7 @@ metadata:
spec:
replicas: 1
template:
+ serviceAccountName: argo-events-sa
container:
env:
- name: DEBUG_LOG
diff --git a/test/e2e/testdata/sensor-multi-sensor-2-atleastonce.yaml b/test/e2e/testdata/sensor-multi-sensor-2-atleastonce.yaml
index af05b01d9e..a9bea2b912 100644
--- a/test/e2e/testdata/sensor-multi-sensor-2-atleastonce.yaml
+++ b/test/e2e/testdata/sensor-multi-sensor-2-atleastonce.yaml
@@ -5,6 +5,7 @@ metadata:
spec:
replicas: 1
template:
+ serviceAccountName: argo-events-sa
container:
env:
- name: DEBUG_LOG
diff --git a/test/e2e/testdata/sensor-multi-sensor-2.yaml b/test/e2e/testdata/sensor-multi-sensor-2.yaml
index 165baf05d3..7528012b2c 100644
--- a/test/e2e/testdata/sensor-multi-sensor-2.yaml
+++ b/test/e2e/testdata/sensor-multi-sensor-2.yaml
@@ -5,6 +5,7 @@ metadata:
spec:
replicas: 1
template:
+ serviceAccountName: argo-events-sa
container:
env:
- name: DEBUG_LOG
diff --git a/test/e2e/testdata/sensor-multi-sensor-atleastonce.yaml b/test/e2e/testdata/sensor-multi-sensor-atleastonce.yaml
index 2ba2cb436d..ec80de564e 100644
--- a/test/e2e/testdata/sensor-multi-sensor-atleastonce.yaml
+++ b/test/e2e/testdata/sensor-multi-sensor-atleastonce.yaml
@@ -5,6 +5,7 @@ metadata:
spec:
replicas: 1
template:
+ serviceAccountName: argo-events-sa
container:
env:
- name: DEBUG_LOG
diff --git a/test/e2e/testdata/sensor-multi-sensor.yaml b/test/e2e/testdata/sensor-multi-sensor.yaml
index f3b9e77615..0fa7ca2de0 100644
--- a/test/e2e/testdata/sensor-multi-sensor.yaml
+++ b/test/e2e/testdata/sensor-multi-sensor.yaml
@@ -5,6 +5,7 @@ metadata:
spec:
replicas: 1
template:
+ serviceAccountName: argo-events-sa
container:
env:
- name: DEBUG_LOG
diff --git a/test/e2e/testdata/sensor-resource.yaml b/test/e2e/testdata/sensor-resource.yaml
index 94dc13a465..e11f0245cd 100644
--- a/test/e2e/testdata/sensor-resource.yaml
+++ b/test/e2e/testdata/sensor-resource.yaml
@@ -3,6 +3,8 @@ kind: Sensor
metadata:
name: e2e-resource-log
spec:
+ template:
+ serviceAccountName: argo-events-sa
dependencies:
- name: test-dep
eventSourceName: test-resource
diff --git a/test/e2e/testdata/sensor-test-metrics.yaml b/test/e2e/testdata/sensor-test-metrics.yaml
index a0c6b68950..d830759cd1 100644
--- a/test/e2e/testdata/sensor-test-metrics.yaml
+++ b/test/e2e/testdata/sensor-test-metrics.yaml
@@ -3,6 +3,8 @@ kind: Sensor
metadata:
name: e2e-test-metrics
spec:
+ template:
+ serviceAccountName: argo-events-sa
dependencies:
- name: test-dep
eventSourceName: e2e-test-metrics-webhook
diff --git a/test/e2e/testdata/sensor-trigger-spec-change-2.yaml b/test/e2e/testdata/sensor-trigger-spec-change-2.yaml
index f4032c052f..736e4cf6be 100644
--- a/test/e2e/testdata/sensor-trigger-spec-change-2.yaml
+++ b/test/e2e/testdata/sensor-trigger-spec-change-2.yaml
@@ -5,6 +5,7 @@ metadata:
spec:
replicas: 1
template:
+ serviceAccountName: argo-events-sa
container:
env:
- name: DEBUG_LOG
diff --git a/test/e2e/testdata/sensor-trigger-spec-change.yaml b/test/e2e/testdata/sensor-trigger-spec-change.yaml
index 9d34202353..c52c0a33d4 100644
--- a/test/e2e/testdata/sensor-trigger-spec-change.yaml
+++ b/test/e2e/testdata/sensor-trigger-spec-change.yaml
@@ -5,6 +5,7 @@ metadata:
spec:
replicas: 1
template:
+ serviceAccountName: argo-events-sa
container:
env:
- name: DEBUG_LOG
diff --git a/test/manifests/kafka/kafka.yaml b/test/manifests/kafka/kafka.yaml
new file mode 100644
index 0000000000..4c38956775
--- /dev/null
+++ b/test/manifests/kafka/kafka.yaml
@@ -0,0 +1,54 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ labels:
+ app: kafka
+ name: kafka
+spec:
+ serviceName: kafka
+ replicas: 1
+ selector:
+ matchLabels:
+ app: kafka
+ template:
+ metadata:
+ labels:
+ app: kafka
+ spec:
+ containers:
+ - env:
+ - name: KAFKA_ADVERTISED_PORT
+ value: "9092"
+ - name: KAFKA_ADVERTISED_HOST_NAME
+ value: "kafka"
+ - name: KAFKA_BROKER_ID
+ value: "0"
+ - name: KAFKA_PORT
+ value: "9092"
+ - name: KAFKA_ZOOKEEPER_CONNECT
+ value: "zookeeper:2181"
+ - name: KAFKA_LISTENERS
+ value: "INSIDE://:9092"
+ - name: KAFKA_ADVERTISED_LISTENERS
+ value: "INSIDE://:9092"
+ - name: KAFKA_INTER_BROKER_LISTENER_NAME
+ value: "INSIDE"
+ - name: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP
+ value: "INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT"
+ image: wurstmeister/kafka
+ name: kafka
+ ports:
+ - containerPort: 9092
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: kafka
+ name: kafka
+spec:
+ type: NodePort
+ selector:
+ app: kafka
+ ports:
+ - port: 9092
diff --git a/test/manifests/kafka/kustomization.yaml b/test/manifests/kafka/kustomization.yaml
new file mode 100644
index 0000000000..cbd4e4b88b
--- /dev/null
+++ b/test/manifests/kafka/kustomization.yaml
@@ -0,0 +1,6 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - kafka.yaml
+ - zookeeper.yaml
diff --git a/test/manifests/kafka/zookeeper.yaml b/test/manifests/kafka/zookeeper.yaml
new file mode 100644
index 0000000000..be0b5dfbb9
--- /dev/null
+++ b/test/manifests/kafka/zookeeper.yaml
@@ -0,0 +1,38 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: zookeeper
+ name: zookeeper
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: zookeeper
+ template:
+ metadata:
+ labels:
+ app: zookeeper
+ spec:
+ containers:
+ - image: wurstmeister/zookeeper
+ imagePullPolicy: IfNotPresent
+ name: zookeeper
+ ports:
+ - containerPort: 2181
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: zookeeper
+ name: zookeeper
+spec:
+ type: NodePort
+ selector:
+ app: zookeeper
+ ports:
+ - name: zookeeper-port
+ port: 2181
+ nodePort: 30181
+ targetPort: 2181
diff --git a/test/util/util.go b/test/util/util.go
index 0b990f9d3c..1175e4486c 100644
--- a/test/util/util.go
+++ b/test/util/util.go
@@ -156,7 +156,7 @@ type podLogCheckOptions struct {
func defaultPodLogCheckOptions() *podLogCheckOptions {
return &podLogCheckOptions{
- timeout: 10 * time.Second,
+ timeout: 15 * time.Second,
count: -1,
}
}