Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(kuma-cp): make store changes processing more reliable (backport of #6728) #6763

Merged
merged 2 commits into from
May 16, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 21 additions & 7 deletions pkg/events/eventbus.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,36 +2,45 @@ package events

import (
"sync"

"github.com/kumahq/kuma/pkg/core"
)

func NewEventBus() *EventBus {
return &EventBus{}
return &EventBus{
subscribers: map[string]chan Event{},
}
}

type EventBus struct {
mtx sync.RWMutex
subscribers []chan Event
subscribers map[string]chan Event
}

func (b *EventBus) New() Listener {
func (b *EventBus) Subscribe() Listener {
id := core.NewUUID()
b.mtx.Lock()
defer b.mtx.Unlock()

events := make(chan Event, 10)
b.subscribers = append(b.subscribers, events)
b.subscribers[id] = events
return &reader{
events: events,
close: func() {
b.mtx.Lock()
defer b.mtx.Unlock()
delete(b.subscribers, id)
},
}
}

func (b *EventBus) Send(event Event) {
b.mtx.RLock()
defer b.mtx.RUnlock()

switch e := event.(type) {
case ResourceChangedEvent:
for _, s := range b.subscribers {
s <- ResourceChangedEvent{
for _, channel := range b.subscribers {
channel <- ResourceChangedEvent{
Operation: e.Operation,
Type: e.Type,
Key: e.Key,
Expand All @@ -42,8 +51,13 @@ func (b *EventBus) Send(event Event) {

type reader struct {
events chan Event
close func()
}

func (k *reader) Recv() <-chan Event {
return k.events
}

func (k *reader) Close() {
k.close()
}
3 changes: 2 additions & 1 deletion pkg/events/interfaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,13 @@ var ListenerStoppedErr = errors.New("listener closed")

type Listener interface {
Recv() <-chan Event
Close()
}

type Emitter interface {
Send(Event)
}

type ListenerFactory interface {
New() Listener
Subscribe() Listener
}
3 changes: 2 additions & 1 deletion pkg/insights/resyncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,8 @@ func (r *resyncer) Start(stop <-chan struct{}) error {
}
}(stop)

eventReader := r.eventFactory.New()
eventReader := r.eventFactory.Subscribe()
defer eventReader.Close()
for {
select {
case <-stop:
Expand Down
9 changes: 8 additions & 1 deletion pkg/insights/test/test_event_reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,17 @@ func (t *TestEventReader) Recv() <-chan events.Event {
return t.Ch
}

// This method should be called only once. In tests, we
// have only one call to subscribe, and we want to avoid
// closing the channel twice, as it may lead to a panic.
func (t *TestEventReader) Close() {
close(t.Ch)
}

type TestEventReaderFactory struct {
Reader *TestEventReader
}

func (t *TestEventReaderFactory) New() events.Listener {
func (t *TestEventReaderFactory) Subscribe() events.Listener {
return t.Reader
}
5 changes: 3 additions & 2 deletions pkg/kds/global/components.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import (
"github.com/kumahq/kuma/pkg/core/resources/registry"
"github.com/kumahq/kuma/pkg/core/resources/store"
"github.com/kumahq/kuma/pkg/core/runtime"
"github.com/kumahq/kuma/pkg/core/runtime/component"
"github.com/kumahq/kuma/pkg/core/user"
"github.com/kumahq/kuma/pkg/kds/client"
"github.com/kumahq/kuma/pkg/kds/mux"
Expand Down Expand Up @@ -73,13 +74,13 @@ func Setup(rt runtime.Runtime) (err error) {
}()
return nil
})
return rt.Add(mux.NewServer(
return rt.Add(component.NewResilientComponent(kdsGlobalLog, mux.NewServer(
onSessionStarted,
rt.KDSContext().GlobalServerFilters,
*rt.Config().Multizone.Global.KDS,
rt.Metrics(),
service.NewGlobalKDSServiceServer(rt.KDSContext().EnvoyAdminRPCs),
))
)))
}

func createZoneIfAbsent(name string, resManager manager.ResourceManager) error {
Expand Down