Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: Remove deprovisioning.Reporter in favor of native eventing #204

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 13 additions & 11 deletions pkg/controllers/deprovisioning/consolidation.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,10 @@ import (

"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider"
deprovisioningevents "github.com/aws/karpenter-core/pkg/controllers/deprovisioning/events"
"github.com/aws/karpenter-core/pkg/controllers/provisioning"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/events"
"github.com/aws/karpenter-core/pkg/metrics"
"github.com/aws/karpenter-core/pkg/scheduling"
)
Expand All @@ -43,19 +45,19 @@ type consolidation struct {
kubeClient client.Client
provisioner *provisioning.Provisioner
cloudProvider cloudprovider.CloudProvider
reporter *Reporter
recorder events.Recorder
lastConsolidationState int64
}

func makeConsolidation(clock clock.Clock, cluster *state.Cluster, kubeClient client.Client, provisioner *provisioning.Provisioner,
cloudProvider cloudprovider.CloudProvider, reporter *Reporter) consolidation {
cloudProvider cloudprovider.CloudProvider, recorder events.Recorder) consolidation {
return consolidation{
clock: clock,
cluster: cluster,
kubeClient: kubeClient,
provisioner: provisioner,
cloudProvider: cloudProvider,
reporter: reporter,
recorder: recorder,
lastConsolidationState: 0,
}
}
Expand All @@ -79,7 +81,7 @@ func (c *consolidation) sortAndFilterCandidates(ctx context.Context, nodes []Can
// filter out nodes that can't be terminated
nodes = lo.Filter(nodes, func(cn CandidateNode, _ int) bool {
if reason, canTerminate := canBeTerminated(cn, pdbs); !canTerminate {
c.reporter.RecordUnconsolidatableReason(ctx, cn.Node, reason)
c.recorder.Publish(deprovisioningevents.UnconsolidatableReason(cn.Node, reason))
return false
}
return true
Expand All @@ -94,15 +96,15 @@ func (c *consolidation) sortAndFilterCandidates(ctx context.Context, nodes []Can
// ShouldDeprovision is a predicate used to filter deprovisionable nodes
func (c *consolidation) ShouldDeprovision(ctx context.Context, n *state.Node, provisioner *v1alpha5.Provisioner, _ []*v1.Pod) bool {
if val, ok := n.Annotations()[v1alpha5.DoNotConsolidateNodeAnnotationKey]; ok {
c.reporter.RecordUnconsolidatableReason(ctx, n.Node, fmt.Sprintf("%s annotation exists", v1alpha5.DoNotConsolidateNodeAnnotationKey))
c.recorder.Publish(deprovisioningevents.UnconsolidatableReason(n.Node, fmt.Sprintf("%s annotation exists", v1alpha5.DoNotConsolidateNodeAnnotationKey)))
return val != "true"
}
if provisioner == nil {
c.reporter.RecordUnconsolidatableReason(ctx, n.Node, "provisioner is unknown")
c.recorder.Publish(deprovisioningevents.UnconsolidatableReason(n.Node, "provisioner is unknown"))
return false
}
if provisioner.Spec.Consolidation == nil || !ptr.BoolValue(provisioner.Spec.Consolidation.Enabled) {
c.reporter.RecordUnconsolidatableReason(ctx, n.Node, fmt.Sprintf("provisioner %s has consolidation disabled", provisioner.Name))
c.recorder.Publish(deprovisioningevents.UnconsolidatableReason(n.Node, fmt.Sprintf("provisioner %s has consolidation disabled", provisioner.Name)))
return false
}
return true
Expand Down Expand Up @@ -191,7 +193,7 @@ func (c *consolidation) computeConsolidation(ctx context.Context, nodes ...Candi
if !allPodsScheduled {
// This method is used by multi-node consolidation as well, so we'll only report in the single node case
if len(nodes) == 1 {
c.reporter.RecordUnconsolidatableReason(ctx, nodes[0].Node, "not all pods would schedule")
c.recorder.Publish(deprovisioningevents.UnconsolidatableReason(nodes[0].Node, "not all pods would schedule"))
}
return Command{action: actionDoNothing}, nil
}
Expand All @@ -207,7 +209,7 @@ func (c *consolidation) computeConsolidation(ctx context.Context, nodes ...Candi
// we're not going to turn a single node into multiple nodes
if len(newNodes) != 1 {
if len(nodes) == 1 {
c.reporter.RecordUnconsolidatableReason(ctx, nodes[0].Node, fmt.Sprintf("can't remove without creating %d nodes", len(newNodes)))
c.recorder.Publish(deprovisioningevents.UnconsolidatableReason(nodes[0].Node, fmt.Sprintf("can't remove without creating %d nodes", len(newNodes))))
}
return Command{action: actionDoNothing}, nil
}
Expand All @@ -221,7 +223,7 @@ func (c *consolidation) computeConsolidation(ctx context.Context, nodes ...Candi
newNodes[0].InstanceTypeOptions = filterByPrice(newNodes[0].InstanceTypeOptions, newNodes[0].Requirements, nodesPrice)
if len(newNodes[0].InstanceTypeOptions) == 0 {
if len(nodes) == 1 {
c.reporter.RecordUnconsolidatableReason(ctx, nodes[0].Node, "can't replace with a cheaper node")
c.recorder.Publish(deprovisioningevents.UnconsolidatableReason(nodes[0].Node, "can't replace with a cheaper node"))
}
// no instance types remain after filtering by price
return Command{action: actionDoNothing}, nil
Expand All @@ -240,7 +242,7 @@ func (c *consolidation) computeConsolidation(ctx context.Context, nodes ...Candi
if allExistingAreSpot &&
newNodes[0].Requirements.Get(v1alpha5.LabelCapacityType).Has(v1alpha5.CapacityTypeSpot) {
if len(nodes) == 1 {
c.reporter.RecordUnconsolidatableReason(ctx, nodes[0].Node, "can't replace a spot node with a spot node")
c.recorder.Publish(deprovisioningevents.UnconsolidatableReason(nodes[0].Node, "can't replace a spot node with a spot node"))
}
return Command{action: actionDoNothing}, nil
}
Expand Down
9 changes: 3 additions & 6 deletions pkg/controllers/deprovisioning/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ type Controller struct {
clock clock.Clock
cloudProvider cloudprovider.CloudProvider
deprovisioners []Deprovisioner
reporter *Reporter
}

// pollingPeriod that we inspect cluster to look for opportunities to deprovision
Expand All @@ -73,14 +72,12 @@ var waitRetryOptions = []retry.Option{
func NewController(clk clock.Clock, kubeClient client.Client, provisioner *provisioning.Provisioner,
cp cloudprovider.CloudProvider, recorder events.Recorder, cluster *state.Cluster) *Controller {

reporter := NewReporter(recorder)
return &Controller{
clock: clk,
kubeClient: kubeClient,
cluster: cluster,
provisioner: provisioner,
recorder: recorder,
reporter: reporter,
cloudProvider: cp,
deprovisioners: []Deprovisioner{
// Expire any nodes that must be deleted, allowing their pods to potentially land on currently
Expand All @@ -90,11 +87,11 @@ func NewController(clk clock.Clock, kubeClient client.Client, provisioner *provi
// Delete any remaining empty nodes as there is zero cost in terms of dirsuption. Emptiness and
// emptyNodeConsolidation are mutually exclusive, only one of these will operate
NewEmptiness(clk),
NewEmptyNodeConsolidation(clk, cluster, kubeClient, provisioner, cp, reporter),
NewEmptyNodeConsolidation(clk, cluster, kubeClient, provisioner, cp, recorder),
// Attempt to identify multiple nodes that we can consolidate simultaneously to reduce pod churn
NewMultiNodeConsolidation(clk, cluster, kubeClient, provisioner, cp, reporter),
NewMultiNodeConsolidation(clk, cluster, kubeClient, provisioner, cp, recorder),
// And finally fall back our single node consolidation to further reduce cluster cost.
NewSingleNodeConsolidation(clk, cluster, kubeClient, provisioner, cp, reporter),
NewSingleNodeConsolidation(clk, cluster, kubeClient, provisioner, cp, recorder),
},
}
}
Expand Down
5 changes: 3 additions & 2 deletions pkg/controllers/deprovisioning/emptynodeconsolidation.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/controllers/provisioning"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/events"
)

// EmptyNodeConsolidation is the consolidation controller that performs multi-node consolidation of entirely empty nodes
Expand All @@ -36,8 +37,8 @@ type EmptyNodeConsolidation struct {
}

func NewEmptyNodeConsolidation(clk clock.Clock, cluster *state.Cluster, kubeClient client.Client,
provisioner *provisioning.Provisioner, cp cloudprovider.CloudProvider, reporter *Reporter) *EmptyNodeConsolidation {
return &EmptyNodeConsolidation{consolidation: makeConsolidation(clk, cluster, kubeClient, provisioner, cp, reporter)}
provisioner *provisioning.Provisioner, cp cloudprovider.CloudProvider, recorder events.Recorder) *EmptyNodeConsolidation {
return &EmptyNodeConsolidation{consolidation: makeConsolidation(clk, cluster, kubeClient, provisioner, cp, recorder)}
}

// ComputeCommand generates a deprovisioning command given deprovisionable nodes
Expand Down
2 changes: 2 additions & 0 deletions pkg/controllers/deprovisioning/events/events.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ package events

import (
"fmt"
"time"

v1 "k8s.io/api/core/v1"

Expand Down Expand Up @@ -69,5 +70,6 @@ func UnconsolidatableReason(node *v1.Node, reason string) events.Event {
Reason: "Unconsolidatable",
Message: reason,
DedupeValues: []string{node.Name},
DedupeTimeout: time.Minute * 15,
}
}
5 changes: 3 additions & 2 deletions pkg/controllers/deprovisioning/multinodeconsolidation.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,16 @@ import (
"github.com/aws/karpenter-core/pkg/controllers/provisioning"
"github.com/aws/karpenter-core/pkg/controllers/provisioning/scheduling"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/events"
)

type MultiNodeConsolidation struct {
consolidation
}

func NewMultiNodeConsolidation(clk clock.Clock, cluster *state.Cluster, kubeClient client.Client,
provisioner *provisioning.Provisioner, cp cloudprovider.CloudProvider, reporter *Reporter) *MultiNodeConsolidation {
return &MultiNodeConsolidation{makeConsolidation(clk, cluster, kubeClient, provisioner, cp, reporter)}
provisioner *provisioning.Provisioner, cp cloudprovider.CloudProvider, recorder events.Recorder) *MultiNodeConsolidation {
return &MultiNodeConsolidation{makeConsolidation(clk, cluster, kubeClient, provisioner, cp, recorder)}
}

func (m *MultiNodeConsolidation) ComputeCommand(ctx context.Context, candidates ...CandidateNode) (Command, error) {
Expand Down
53 changes: 0 additions & 53 deletions pkg/controllers/deprovisioning/reporter.go

This file was deleted.

5 changes: 3 additions & 2 deletions pkg/controllers/deprovisioning/singlenodeconsolidation.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/controllers/provisioning"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/events"
)

// SingleNodeConsolidation is the consolidation controller that performs single node consolidation.
Expand All @@ -33,8 +34,8 @@ type SingleNodeConsolidation struct {
}

func NewSingleNodeConsolidation(clk clock.Clock, cluster *state.Cluster, kubeClient client.Client, provisioner *provisioning.Provisioner,
cp cloudprovider.CloudProvider, reporter *Reporter) *SingleNodeConsolidation {
return &SingleNodeConsolidation{consolidation: makeConsolidation(clk, cluster, kubeClient, provisioner, cp, reporter)}
cp cloudprovider.CloudProvider, recorder events.Recorder) *SingleNodeConsolidation {
return &SingleNodeConsolidation{consolidation: makeConsolidation(clk, cluster, kubeClient, provisioner, cp, recorder)}
}

// ComputeCommand generates a deprovisioning command given deprovisionable nodes
Expand Down
16 changes: 12 additions & 4 deletions pkg/events/recorder.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ type Event struct {
Reason string
Message string
DedupeValues []string
DedupeTimeout time.Duration
RateLimiter flowcontrol.RateLimiter
}

Expand All @@ -50,17 +51,24 @@ type recorder struct {
cache *cache.Cache
}

const defaultDedupeTimeout = 2 * time.Minute

func NewRecorder(r record.EventRecorder) Recorder {
return &recorder{
rec: r,
cache: cache.New(120*time.Second, 10*time.Second),
cache: cache.New(defaultDedupeTimeout, 10*time.Second),
}
}

// Publish creates a Kubernetes event using the passed event struct
func (r *recorder) Publish(evt Event) {
// Override the timeout if one is set for an event
timeout := defaultDedupeTimeout
if evt.DedupeTimeout != 0 {
timeout = evt.DedupeTimeout
}
// Dedupe same events that involve the same object and are close together
if len(evt.DedupeValues) > 0 && !r.shouldCreateEvent(evt.dedupeKey()) {
if len(evt.DedupeValues) > 0 && !r.shouldCreateEvent(evt.dedupeKey(), timeout) {
return
}
// If the event is rate-limited, then validate we should create the event
Expand All @@ -70,10 +78,10 @@ func (r *recorder) Publish(evt Event) {
r.rec.Event(evt.InvolvedObject, evt.Type, evt.Reason, evt.Message)
}

func (r *recorder) shouldCreateEvent(key string) bool {
func (r *recorder) shouldCreateEvent(key string, timeout time.Duration) bool {
if _, exists := r.cache.Get(key); exists {
return false
}
r.cache.SetDefault(key, nil)
r.cache.Set(key, nil, timeout)
return true
}
16 changes: 16 additions & 0 deletions pkg/events/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,22 @@ var _ = Describe("Dedupe", func() {
}
Expect(internalRecorder.Calls(events.EvictPod(PodWithUID()).Reason)).To(Equal(1))
})
It("should allow the dedupe timeout to be overridden", func() {
pod := PodWithUID()
evt := events.EvictPod(pod)
evt.DedupeTimeout = time.Second * 2

// Generate a set of events within the dedupe timeout
for i := 0; i < 10; i++ {
eventRecorder.Publish(evt)
}
Expect(internalRecorder.Calls(events.EvictPod(PodWithUID()).Reason)).To(Equal(1))

// Wait until after the overridden dedupe timeout
time.Sleep(time.Second * 3)
eventRecorder.Publish(evt)
Expect(internalRecorder.Calls(events.EvictPod(PodWithUID()).Reason)).To(Equal(2))
})
It("should allow events with different entities to be created", func() {
for i := 0; i < 100; i++ {
eventRecorder.Publish(events.EvictPod(PodWithUID()))
Expand Down